/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ // automatically generated by the FlatBuffers compiler, do not modify #ifndef FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_ #define FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_ #include "flatbuffers/flatbuffers.h" namespace tflite { struct ComputeSettings; struct ComputeSettingsBuilder; struct ComputeSettingsT; struct NNAPISettings; struct NNAPISettingsBuilder; struct NNAPISettingsT; struct GPUSettings; struct GPUSettingsBuilder; struct GPUSettingsT; struct HexagonSettings; struct HexagonSettingsBuilder; struct HexagonSettingsT; struct XNNPackSettings; struct XNNPackSettingsBuilder; struct XNNPackSettingsT; struct CoreMLSettings; struct CoreMLSettingsBuilder; struct CoreMLSettingsT; struct EdgeTpuDeviceSpec; struct EdgeTpuDeviceSpecBuilder; struct EdgeTpuDeviceSpecT; struct EdgeTpuInactivePowerConfig; struct EdgeTpuInactivePowerConfigBuilder; struct EdgeTpuInactivePowerConfigT; struct EdgeTpuSettings; struct EdgeTpuSettingsBuilder; struct EdgeTpuSettingsT; struct CoralSettings; struct CoralSettingsBuilder; struct CoralSettingsT; struct CPUSettings; struct CPUSettingsBuilder; struct CPUSettingsT; struct TFLiteSettings; struct TFLiteSettingsBuilder; struct TFLiteSettingsT; struct FallbackSettings; struct FallbackSettingsBuilder; struct FallbackSettingsT; struct BenchmarkMetric; struct BenchmarkMetricBuilder; struct BenchmarkMetricT; struct BenchmarkResult; struct BenchmarkResultBuilder; struct BenchmarkResultT; struct ErrorCode; struct ErrorCodeBuilder; struct ErrorCodeT; struct BenchmarkError; struct BenchmarkErrorBuilder; struct BenchmarkErrorT; struct BenchmarkEvent; struct BenchmarkEventBuilder; struct BenchmarkEventT; struct BestAccelerationDecision; struct BestAccelerationDecisionBuilder; struct BestAccelerationDecisionT; struct BenchmarkInitializationFailure; struct BenchmarkInitializationFailureBuilder; struct BenchmarkInitializationFailureT; struct MiniBenchmarkEvent; struct MiniBenchmarkEventBuilder; struct MiniBenchmarkEventT; struct ModelFile; struct ModelFileBuilder; struct ModelFileT; struct BenchmarkStoragePaths; struct BenchmarkStoragePathsBuilder; struct BenchmarkStoragePathsT; struct MinibenchmarkSettings; struct MinibenchmarkSettingsBuilder; struct MinibenchmarkSettingsT; bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs); bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs); bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs); bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs); bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs); bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs); bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs); bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs); bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs); bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs); bool operator==(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs); bool operator!=(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs); bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs); bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs); bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs); bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs); bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs); bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs); bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs); bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs); bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs); bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs); bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs); bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs); bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs); bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs); bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs); bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs); bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs); bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs); bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs); bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs); bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs); bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs); bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs); bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs); bool operator==(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs); bool operator!=(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs); bool operator==(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs); bool operator!=(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs); bool operator==(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs); bool operator!=(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs); bool operator==(const ModelFileT &lhs, const ModelFileT &rhs); bool operator!=(const ModelFileT &lhs, const ModelFileT &rhs); bool operator==(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs); bool operator!=(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs); bool operator==(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs); bool operator!=(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs); enum ExecutionPreference : int32_t { ExecutionPreference_ANY = 0, ExecutionPreference_LOW_LATENCY = 1, ExecutionPreference_LOW_POWER = 2, ExecutionPreference_FORCE_CPU = 3, ExecutionPreference_MIN = ExecutionPreference_ANY, ExecutionPreference_MAX = ExecutionPreference_FORCE_CPU }; inline const ExecutionPreference (&EnumValuesExecutionPreference())[4] { static const ExecutionPreference values[] = { ExecutionPreference_ANY, ExecutionPreference_LOW_LATENCY, ExecutionPreference_LOW_POWER, ExecutionPreference_FORCE_CPU }; return values; } inline const char * const *EnumNamesExecutionPreference() { static const char * const names[5] = { "ANY", "LOW_LATENCY", "LOW_POWER", "FORCE_CPU", nullptr }; return names; } inline const char *EnumNameExecutionPreference(ExecutionPreference e) { if (flatbuffers::IsOutRange(e, ExecutionPreference_ANY, ExecutionPreference_FORCE_CPU)) return ""; const size_t index = static_cast(e); return EnumNamesExecutionPreference()[index]; } enum Delegate : int32_t { Delegate_NONE = 0, Delegate_NNAPI = 1, Delegate_GPU = 2, Delegate_HEXAGON = 3, Delegate_XNNPACK = 4, Delegate_EDGETPU = 5, Delegate_EDGETPU_CORAL = 6, Delegate_CORE_ML = 7, Delegate_MIN = Delegate_NONE, Delegate_MAX = Delegate_CORE_ML }; inline const Delegate (&EnumValuesDelegate())[8] { static const Delegate values[] = { Delegate_NONE, Delegate_NNAPI, Delegate_GPU, Delegate_HEXAGON, Delegate_XNNPACK, Delegate_EDGETPU, Delegate_EDGETPU_CORAL, Delegate_CORE_ML }; return values; } inline const char * const *EnumNamesDelegate() { static const char * const names[9] = { "NONE", "NNAPI", "GPU", "HEXAGON", "XNNPACK", "EDGETPU", "EDGETPU_CORAL", "CORE_ML", nullptr }; return names; } inline const char *EnumNameDelegate(Delegate e) { if (flatbuffers::IsOutRange(e, Delegate_NONE, Delegate_CORE_ML)) return ""; const size_t index = static_cast(e); return EnumNamesDelegate()[index]; } enum NNAPIExecutionPreference : int32_t { NNAPIExecutionPreference_UNDEFINED = 0, NNAPIExecutionPreference_NNAPI_LOW_POWER = 1, NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER = 2, NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED = 3, NNAPIExecutionPreference_MIN = NNAPIExecutionPreference_UNDEFINED, NNAPIExecutionPreference_MAX = NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED }; inline const NNAPIExecutionPreference (&EnumValuesNNAPIExecutionPreference())[4] { static const NNAPIExecutionPreference values[] = { NNAPIExecutionPreference_UNDEFINED, NNAPIExecutionPreference_NNAPI_LOW_POWER, NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER, NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED }; return values; } inline const char * const *EnumNamesNNAPIExecutionPreference() { static const char * const names[5] = { "UNDEFINED", "NNAPI_LOW_POWER", "NNAPI_FAST_SINGLE_ANSWER", "NNAPI_SUSTAINED_SPEED", nullptr }; return names; } inline const char *EnumNameNNAPIExecutionPreference(NNAPIExecutionPreference e) { if (flatbuffers::IsOutRange(e, NNAPIExecutionPreference_UNDEFINED, NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED)) return ""; const size_t index = static_cast(e); return EnumNamesNNAPIExecutionPreference()[index]; } enum NNAPIExecutionPriority : int32_t { NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED = 0, NNAPIExecutionPriority_NNAPI_PRIORITY_LOW = 1, NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM = 2, NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH = 3, NNAPIExecutionPriority_MIN = NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, NNAPIExecutionPriority_MAX = NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH }; inline const NNAPIExecutionPriority (&EnumValuesNNAPIExecutionPriority())[4] { static const NNAPIExecutionPriority values[] = { NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, NNAPIExecutionPriority_NNAPI_PRIORITY_LOW, NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM, NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH }; return values; } inline const char * const *EnumNamesNNAPIExecutionPriority() { static const char * const names[5] = { "NNAPI_PRIORITY_UNDEFINED", "NNAPI_PRIORITY_LOW", "NNAPI_PRIORITY_MEDIUM", "NNAPI_PRIORITY_HIGH", nullptr }; return names; } inline const char *EnumNameNNAPIExecutionPriority(NNAPIExecutionPriority e) { if (flatbuffers::IsOutRange(e, NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH)) return ""; const size_t index = static_cast(e); return EnumNamesNNAPIExecutionPriority()[index]; } enum GPUBackend : int32_t { GPUBackend_UNSET = 0, GPUBackend_OPENCL = 1, GPUBackend_OPENGL = 2, GPUBackend_MIN = GPUBackend_UNSET, GPUBackend_MAX = GPUBackend_OPENGL }; inline const GPUBackend (&EnumValuesGPUBackend())[3] { static const GPUBackend values[] = { GPUBackend_UNSET, GPUBackend_OPENCL, GPUBackend_OPENGL }; return values; } inline const char * const *EnumNamesGPUBackend() { static const char * const names[4] = { "UNSET", "OPENCL", "OPENGL", nullptr }; return names; } inline const char *EnumNameGPUBackend(GPUBackend e) { if (flatbuffers::IsOutRange(e, GPUBackend_UNSET, GPUBackend_OPENGL)) return ""; const size_t index = static_cast(e); return EnumNamesGPUBackend()[index]; } enum GPUInferencePriority : int32_t { GPUInferencePriority_GPU_PRIORITY_AUTO = 0, GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION = 1, GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY = 2, GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE = 3, GPUInferencePriority_MIN = GPUInferencePriority_GPU_PRIORITY_AUTO, GPUInferencePriority_MAX = GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE }; inline const GPUInferencePriority (&EnumValuesGPUInferencePriority())[4] { static const GPUInferencePriority values[] = { GPUInferencePriority_GPU_PRIORITY_AUTO, GPUInferencePriority_GPU_PRIORITY_MAX_PRECISION, GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY, GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE }; return values; } inline const char * const *EnumNamesGPUInferencePriority() { static const char * const names[5] = { "GPU_PRIORITY_AUTO", "GPU_PRIORITY_MAX_PRECISION", "GPU_PRIORITY_MIN_LATENCY", "GPU_PRIORITY_MIN_MEMORY_USAGE", nullptr }; return names; } inline const char *EnumNameGPUInferencePriority(GPUInferencePriority e) { if (flatbuffers::IsOutRange(e, GPUInferencePriority_GPU_PRIORITY_AUTO, GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE)) return ""; const size_t index = static_cast(e); return EnumNamesGPUInferencePriority()[index]; } enum GPUInferenceUsage : int32_t { GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER = 0, GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED = 1, GPUInferenceUsage_MIN = GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER, GPUInferenceUsage_MAX = GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED }; inline const GPUInferenceUsage (&EnumValuesGPUInferenceUsage())[2] { static const GPUInferenceUsage values[] = { GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER, GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED }; return values; } inline const char * const *EnumNamesGPUInferenceUsage() { static const char * const names[3] = { "GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER", "GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED", nullptr }; return names; } inline const char *EnumNameGPUInferenceUsage(GPUInferenceUsage e) { if (flatbuffers::IsOutRange(e, GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER, GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED)) return ""; const size_t index = static_cast(e); return EnumNamesGPUInferenceUsage()[index]; } enum XNNPackFlags : int32_t { XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS = 0, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8 = 1, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8 = 2, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8 = 3, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16 = 4, XNNPackFlags_MIN = XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS, XNNPackFlags_MAX = XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16 }; inline const XNNPackFlags (&EnumValuesXNNPackFlags())[5] { static const XNNPackFlags values[] = { XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QU8, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16 }; return values; } inline const char * const *EnumNamesXNNPackFlags() { static const char * const names[6] = { "TFLITE_XNNPACK_DELEGATE_NO_FLAGS", "TFLITE_XNNPACK_DELEGATE_FLAG_QS8", "TFLITE_XNNPACK_DELEGATE_FLAG_QU8", "TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8", "TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16", nullptr }; return names; } inline const char *EnumNameXNNPackFlags(XNNPackFlags e) { if (flatbuffers::IsOutRange(e, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS, XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_FORCE_FP16)) return ""; const size_t index = static_cast(e); return EnumNamesXNNPackFlags()[index]; } namespace CoreMLSettings_ { enum EnabledDevices : int32_t { EnabledDevices_DEVICES_ALL = 0, EnabledDevices_DEVICES_WITH_NEURAL_ENGINE = 1, EnabledDevices_MIN = EnabledDevices_DEVICES_ALL, EnabledDevices_MAX = EnabledDevices_DEVICES_WITH_NEURAL_ENGINE }; inline const EnabledDevices (&EnumValuesEnabledDevices())[2] { static const EnabledDevices values[] = { EnabledDevices_DEVICES_ALL, EnabledDevices_DEVICES_WITH_NEURAL_ENGINE }; return values; } inline const char * const *EnumNamesEnabledDevices() { static const char * const names[3] = { "DEVICES_ALL", "DEVICES_WITH_NEURAL_ENGINE", nullptr }; return names; } inline const char *EnumNameEnabledDevices(EnabledDevices e) { if (flatbuffers::IsOutRange(e, EnabledDevices_DEVICES_ALL, EnabledDevices_DEVICES_WITH_NEURAL_ENGINE)) return ""; const size_t index = static_cast(e); return EnumNamesEnabledDevices()[index]; } } // namespace CoreMLSettings_ namespace EdgeTpuDeviceSpec_ { enum PlatformType : int32_t { PlatformType_MMIO = 0, PlatformType_REFERENCE = 1, PlatformType_SIMULATOR = 2, PlatformType_REMOTE_SIMULATOR = 3, PlatformType_MIN = PlatformType_MMIO, PlatformType_MAX = PlatformType_REMOTE_SIMULATOR }; inline const PlatformType (&EnumValuesPlatformType())[4] { static const PlatformType values[] = { PlatformType_MMIO, PlatformType_REFERENCE, PlatformType_SIMULATOR, PlatformType_REMOTE_SIMULATOR }; return values; } inline const char * const *EnumNamesPlatformType() { static const char * const names[5] = { "MMIO", "REFERENCE", "SIMULATOR", "REMOTE_SIMULATOR", nullptr }; return names; } inline const char *EnumNamePlatformType(PlatformType e) { if (flatbuffers::IsOutRange(e, PlatformType_MMIO, PlatformType_REMOTE_SIMULATOR)) return ""; const size_t index = static_cast(e); return EnumNamesPlatformType()[index]; } } // namespace EdgeTpuDeviceSpec_ enum EdgeTpuPowerState : int32_t { EdgeTpuPowerState_UNDEFINED_POWERSTATE = 0, EdgeTpuPowerState_TPU_CORE_OFF = 1, EdgeTpuPowerState_READY = 2, EdgeTpuPowerState_ACTIVE_MIN_POWER = 3, EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER = 4, EdgeTpuPowerState_ACTIVE_LOW_POWER = 5, EdgeTpuPowerState_ACTIVE = 6, EdgeTpuPowerState_OVER_DRIVE = 7, EdgeTpuPowerState_MIN = EdgeTpuPowerState_UNDEFINED_POWERSTATE, EdgeTpuPowerState_MAX = EdgeTpuPowerState_OVER_DRIVE }; inline const EdgeTpuPowerState (&EnumValuesEdgeTpuPowerState())[8] { static const EdgeTpuPowerState values[] = { EdgeTpuPowerState_UNDEFINED_POWERSTATE, EdgeTpuPowerState_TPU_CORE_OFF, EdgeTpuPowerState_READY, EdgeTpuPowerState_ACTIVE_MIN_POWER, EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER, EdgeTpuPowerState_ACTIVE_LOW_POWER, EdgeTpuPowerState_ACTIVE, EdgeTpuPowerState_OVER_DRIVE }; return values; } inline const char * const *EnumNamesEdgeTpuPowerState() { static const char * const names[9] = { "UNDEFINED_POWERSTATE", "TPU_CORE_OFF", "READY", "ACTIVE_MIN_POWER", "ACTIVE_VERY_LOW_POWER", "ACTIVE_LOW_POWER", "ACTIVE", "OVER_DRIVE", nullptr }; return names; } inline const char *EnumNameEdgeTpuPowerState(EdgeTpuPowerState e) { if (flatbuffers::IsOutRange(e, EdgeTpuPowerState_UNDEFINED_POWERSTATE, EdgeTpuPowerState_OVER_DRIVE)) return ""; const size_t index = static_cast(e); return EnumNamesEdgeTpuPowerState()[index]; } namespace EdgeTpuSettings_ { enum FloatTruncationType : int32_t { FloatTruncationType_UNSPECIFIED = 0, FloatTruncationType_NO_TRUNCATION = 1, FloatTruncationType_BFLOAT16 = 2, FloatTruncationType_HALF = 3, FloatTruncationType_MIN = FloatTruncationType_UNSPECIFIED, FloatTruncationType_MAX = FloatTruncationType_HALF }; inline const FloatTruncationType (&EnumValuesFloatTruncationType())[4] { static const FloatTruncationType values[] = { FloatTruncationType_UNSPECIFIED, FloatTruncationType_NO_TRUNCATION, FloatTruncationType_BFLOAT16, FloatTruncationType_HALF }; return values; } inline const char * const *EnumNamesFloatTruncationType() { static const char * const names[5] = { "UNSPECIFIED", "NO_TRUNCATION", "BFLOAT16", "HALF", nullptr }; return names; } inline const char *EnumNameFloatTruncationType(FloatTruncationType e) { if (flatbuffers::IsOutRange(e, FloatTruncationType_UNSPECIFIED, FloatTruncationType_HALF)) return ""; const size_t index = static_cast(e); return EnumNamesFloatTruncationType()[index]; } enum QosClass : int32_t { QosClass_QOS_UNDEFINED = 0, QosClass_BEST_EFFORT = 1, QosClass_REALTIME = 2, QosClass_MIN = QosClass_QOS_UNDEFINED, QosClass_MAX = QosClass_REALTIME }; inline const QosClass (&EnumValuesQosClass())[3] { static const QosClass values[] = { QosClass_QOS_UNDEFINED, QosClass_BEST_EFFORT, QosClass_REALTIME }; return values; } inline const char * const *EnumNamesQosClass() { static const char * const names[4] = { "QOS_UNDEFINED", "BEST_EFFORT", "REALTIME", nullptr }; return names; } inline const char *EnumNameQosClass(QosClass e) { if (flatbuffers::IsOutRange(e, QosClass_QOS_UNDEFINED, QosClass_REALTIME)) return ""; const size_t index = static_cast(e); return EnumNamesQosClass()[index]; } } // namespace EdgeTpuSettings_ namespace CoralSettings_ { enum Performance : int32_t { Performance_UNDEFINED = 0, Performance_MAXIMUM = 1, Performance_HIGH = 2, Performance_MEDIUM = 3, Performance_LOW = 4, Performance_MIN = Performance_UNDEFINED, Performance_MAX = Performance_LOW }; inline const Performance (&EnumValuesPerformance())[5] { static const Performance values[] = { Performance_UNDEFINED, Performance_MAXIMUM, Performance_HIGH, Performance_MEDIUM, Performance_LOW }; return values; } inline const char * const *EnumNamesPerformance() { static const char * const names[6] = { "UNDEFINED", "MAXIMUM", "HIGH", "MEDIUM", "LOW", nullptr }; return names; } inline const char *EnumNamePerformance(Performance e) { if (flatbuffers::IsOutRange(e, Performance_UNDEFINED, Performance_LOW)) return ""; const size_t index = static_cast(e); return EnumNamesPerformance()[index]; } } // namespace CoralSettings_ enum BenchmarkEventType : int32_t { BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE = 0, BenchmarkEventType_START = 1, BenchmarkEventType_END = 2, BenchmarkEventType_ERROR = 3, BenchmarkEventType_LOGGED = 4, BenchmarkEventType_RECOVERED_ERROR = 5, BenchmarkEventType_MIN = BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, BenchmarkEventType_MAX = BenchmarkEventType_RECOVERED_ERROR }; inline const BenchmarkEventType (&EnumValuesBenchmarkEventType())[6] { static const BenchmarkEventType values[] = { BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, BenchmarkEventType_START, BenchmarkEventType_END, BenchmarkEventType_ERROR, BenchmarkEventType_LOGGED, BenchmarkEventType_RECOVERED_ERROR }; return values; } inline const char * const *EnumNamesBenchmarkEventType() { static const char * const names[7] = { "UNDEFINED_BENCHMARK_EVENT_TYPE", "START", "END", "ERROR", "LOGGED", "RECOVERED_ERROR", nullptr }; return names; } inline const char *EnumNameBenchmarkEventType(BenchmarkEventType e) { if (flatbuffers::IsOutRange(e, BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, BenchmarkEventType_RECOVERED_ERROR)) return ""; const size_t index = static_cast(e); return EnumNamesBenchmarkEventType()[index]; } enum BenchmarkStage : int32_t { BenchmarkStage_UNKNOWN = 0, BenchmarkStage_INITIALIZATION = 1, BenchmarkStage_INFERENCE = 2, BenchmarkStage_MIN = BenchmarkStage_UNKNOWN, BenchmarkStage_MAX = BenchmarkStage_INFERENCE }; inline const BenchmarkStage (&EnumValuesBenchmarkStage())[3] { static const BenchmarkStage values[] = { BenchmarkStage_UNKNOWN, BenchmarkStage_INITIALIZATION, BenchmarkStage_INFERENCE }; return values; } inline const char * const *EnumNamesBenchmarkStage() { static const char * const names[4] = { "UNKNOWN", "INITIALIZATION", "INFERENCE", nullptr }; return names; } inline const char *EnumNameBenchmarkStage(BenchmarkStage e) { if (flatbuffers::IsOutRange(e, BenchmarkStage_UNKNOWN, BenchmarkStage_INFERENCE)) return ""; const size_t index = static_cast(e); return EnumNamesBenchmarkStage()[index]; } struct ComputeSettingsT : public flatbuffers::NativeTable { typedef ComputeSettings TableType; tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY; std::unique_ptr tflite_settings{}; std::string model_namespace_for_statistics{}; std::string model_identifier_for_statistics{}; std::unique_ptr settings_to_test_locally{}; ComputeSettingsT() = default; ComputeSettingsT(const ComputeSettingsT &o); ComputeSettingsT(ComputeSettingsT&&) FLATBUFFERS_NOEXCEPT = default; ComputeSettingsT &operator=(ComputeSettingsT o) FLATBUFFERS_NOEXCEPT; }; struct ComputeSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ComputeSettingsT NativeTableType; typedef ComputeSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PREFERENCE = 4, VT_TFLITE_SETTINGS = 6, VT_MODEL_NAMESPACE_FOR_STATISTICS = 8, VT_MODEL_IDENTIFIER_FOR_STATISTICS = 10, VT_SETTINGS_TO_TEST_LOCALLY = 12 }; tflite::ExecutionPreference preference() const { return static_cast(GetField(VT_PREFERENCE, 0)); } const tflite::TFLiteSettings *tflite_settings() const { return GetPointer(VT_TFLITE_SETTINGS); } const flatbuffers::String *model_namespace_for_statistics() const { return GetPointer(VT_MODEL_NAMESPACE_FOR_STATISTICS); } const flatbuffers::String *model_identifier_for_statistics() const { return GetPointer(VT_MODEL_IDENTIFIER_FOR_STATISTICS); } const tflite::MinibenchmarkSettings *settings_to_test_locally() const { return GetPointer(VT_SETTINGS_TO_TEST_LOCALLY); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PREFERENCE, 4) && VerifyOffset(verifier, VT_TFLITE_SETTINGS) && verifier.VerifyTable(tflite_settings()) && VerifyOffset(verifier, VT_MODEL_NAMESPACE_FOR_STATISTICS) && verifier.VerifyString(model_namespace_for_statistics()) && VerifyOffset(verifier, VT_MODEL_IDENTIFIER_FOR_STATISTICS) && verifier.VerifyString(model_identifier_for_statistics()) && VerifyOffset(verifier, VT_SETTINGS_TO_TEST_LOCALLY) && verifier.VerifyTable(settings_to_test_locally()) && verifier.EndTable(); } ComputeSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(ComputeSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct ComputeSettingsBuilder { typedef ComputeSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_preference(tflite::ExecutionPreference preference) { fbb_.AddElement(ComputeSettings::VT_PREFERENCE, static_cast(preference), 0); } void add_tflite_settings(flatbuffers::Offset tflite_settings) { fbb_.AddOffset(ComputeSettings::VT_TFLITE_SETTINGS, tflite_settings); } void add_model_namespace_for_statistics(flatbuffers::Offset model_namespace_for_statistics) { fbb_.AddOffset(ComputeSettings::VT_MODEL_NAMESPACE_FOR_STATISTICS, model_namespace_for_statistics); } void add_model_identifier_for_statistics(flatbuffers::Offset model_identifier_for_statistics) { fbb_.AddOffset(ComputeSettings::VT_MODEL_IDENTIFIER_FOR_STATISTICS, model_identifier_for_statistics); } void add_settings_to_test_locally(flatbuffers::Offset settings_to_test_locally) { fbb_.AddOffset(ComputeSettings::VT_SETTINGS_TO_TEST_LOCALLY, settings_to_test_locally); } explicit ComputeSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateComputeSettings( flatbuffers::FlatBufferBuilder &_fbb, tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY, flatbuffers::Offset tflite_settings = 0, flatbuffers::Offset model_namespace_for_statistics = 0, flatbuffers::Offset model_identifier_for_statistics = 0, flatbuffers::Offset settings_to_test_locally = 0) { ComputeSettingsBuilder builder_(_fbb); builder_.add_settings_to_test_locally(settings_to_test_locally); builder_.add_model_identifier_for_statistics(model_identifier_for_statistics); builder_.add_model_namespace_for_statistics(model_namespace_for_statistics); builder_.add_tflite_settings(tflite_settings); builder_.add_preference(preference); return builder_.Finish(); } inline flatbuffers::Offset CreateComputeSettingsDirect( flatbuffers::FlatBufferBuilder &_fbb, tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY, flatbuffers::Offset tflite_settings = 0, const char *model_namespace_for_statistics = nullptr, const char *model_identifier_for_statistics = nullptr, flatbuffers::Offset settings_to_test_locally = 0) { auto model_namespace_for_statistics__ = model_namespace_for_statistics ? _fbb.CreateString(model_namespace_for_statistics) : 0; auto model_identifier_for_statistics__ = model_identifier_for_statistics ? _fbb.CreateString(model_identifier_for_statistics) : 0; return tflite::CreateComputeSettings( _fbb, preference, tflite_settings, model_namespace_for_statistics__, model_identifier_for_statistics__, settings_to_test_locally); } flatbuffers::Offset CreateComputeSettings(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct NNAPISettingsT : public flatbuffers::NativeTable { typedef NNAPISettings TableType; std::string accelerator_name{}; std::string cache_directory{}; std::string model_token{}; tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED; int32_t no_of_nnapi_instances_to_cache = 0; std::unique_ptr fallback_settings{}; bool allow_nnapi_cpu_on_android_10_plus = false; tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED; bool allow_dynamic_dimensions = false; bool allow_fp16_precision_for_fp32 = false; bool use_burst_computation = false; int64_t support_library_handle = 0; NNAPISettingsT() = default; NNAPISettingsT(const NNAPISettingsT &o); NNAPISettingsT(NNAPISettingsT&&) FLATBUFFERS_NOEXCEPT = default; NNAPISettingsT &operator=(NNAPISettingsT o) FLATBUFFERS_NOEXCEPT; }; struct NNAPISettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef NNAPISettingsT NativeTableType; typedef NNAPISettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_ACCELERATOR_NAME = 4, VT_CACHE_DIRECTORY = 6, VT_MODEL_TOKEN = 8, VT_EXECUTION_PREFERENCE = 10, VT_NO_OF_NNAPI_INSTANCES_TO_CACHE = 12, VT_FALLBACK_SETTINGS = 14, VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS = 16, VT_EXECUTION_PRIORITY = 18, VT_ALLOW_DYNAMIC_DIMENSIONS = 20, VT_ALLOW_FP16_PRECISION_FOR_FP32 = 22, VT_USE_BURST_COMPUTATION = 24, VT_SUPPORT_LIBRARY_HANDLE = 26 }; const flatbuffers::String *accelerator_name() const { return GetPointer(VT_ACCELERATOR_NAME); } const flatbuffers::String *cache_directory() const { return GetPointer(VT_CACHE_DIRECTORY); } const flatbuffers::String *model_token() const { return GetPointer(VT_MODEL_TOKEN); } tflite::NNAPIExecutionPreference execution_preference() const { return static_cast(GetField(VT_EXECUTION_PREFERENCE, 0)); } int32_t no_of_nnapi_instances_to_cache() const { return GetField(VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, 0); } const tflite::FallbackSettings *fallback_settings() const { return GetPointer(VT_FALLBACK_SETTINGS); } bool allow_nnapi_cpu_on_android_10_plus() const { return GetField(VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, 0) != 0; } tflite::NNAPIExecutionPriority execution_priority() const { return static_cast(GetField(VT_EXECUTION_PRIORITY, 0)); } bool allow_dynamic_dimensions() const { return GetField(VT_ALLOW_DYNAMIC_DIMENSIONS, 0) != 0; } bool allow_fp16_precision_for_fp32() const { return GetField(VT_ALLOW_FP16_PRECISION_FOR_FP32, 0) != 0; } bool use_burst_computation() const { return GetField(VT_USE_BURST_COMPUTATION, 0) != 0; } int64_t support_library_handle() const { return GetField(VT_SUPPORT_LIBRARY_HANDLE, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_ACCELERATOR_NAME) && verifier.VerifyString(accelerator_name()) && VerifyOffset(verifier, VT_CACHE_DIRECTORY) && verifier.VerifyString(cache_directory()) && VerifyOffset(verifier, VT_MODEL_TOKEN) && verifier.VerifyString(model_token()) && VerifyField(verifier, VT_EXECUTION_PREFERENCE, 4) && VerifyField(verifier, VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, 4) && VerifyOffset(verifier, VT_FALLBACK_SETTINGS) && verifier.VerifyTable(fallback_settings()) && VerifyField(verifier, VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, 1) && VerifyField(verifier, VT_EXECUTION_PRIORITY, 4) && VerifyField(verifier, VT_ALLOW_DYNAMIC_DIMENSIONS, 1) && VerifyField(verifier, VT_ALLOW_FP16_PRECISION_FOR_FP32, 1) && VerifyField(verifier, VT_USE_BURST_COMPUTATION, 1) && VerifyField(verifier, VT_SUPPORT_LIBRARY_HANDLE, 8) && verifier.EndTable(); } NNAPISettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(NNAPISettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct NNAPISettingsBuilder { typedef NNAPISettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_accelerator_name(flatbuffers::Offset accelerator_name) { fbb_.AddOffset(NNAPISettings::VT_ACCELERATOR_NAME, accelerator_name); } void add_cache_directory(flatbuffers::Offset cache_directory) { fbb_.AddOffset(NNAPISettings::VT_CACHE_DIRECTORY, cache_directory); } void add_model_token(flatbuffers::Offset model_token) { fbb_.AddOffset(NNAPISettings::VT_MODEL_TOKEN, model_token); } void add_execution_preference(tflite::NNAPIExecutionPreference execution_preference) { fbb_.AddElement(NNAPISettings::VT_EXECUTION_PREFERENCE, static_cast(execution_preference), 0); } void add_no_of_nnapi_instances_to_cache(int32_t no_of_nnapi_instances_to_cache) { fbb_.AddElement(NNAPISettings::VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, no_of_nnapi_instances_to_cache, 0); } void add_fallback_settings(flatbuffers::Offset fallback_settings) { fbb_.AddOffset(NNAPISettings::VT_FALLBACK_SETTINGS, fallback_settings); } void add_allow_nnapi_cpu_on_android_10_plus(bool allow_nnapi_cpu_on_android_10_plus) { fbb_.AddElement(NNAPISettings::VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, static_cast(allow_nnapi_cpu_on_android_10_plus), 0); } void add_execution_priority(tflite::NNAPIExecutionPriority execution_priority) { fbb_.AddElement(NNAPISettings::VT_EXECUTION_PRIORITY, static_cast(execution_priority), 0); } void add_allow_dynamic_dimensions(bool allow_dynamic_dimensions) { fbb_.AddElement(NNAPISettings::VT_ALLOW_DYNAMIC_DIMENSIONS, static_cast(allow_dynamic_dimensions), 0); } void add_allow_fp16_precision_for_fp32(bool allow_fp16_precision_for_fp32) { fbb_.AddElement(NNAPISettings::VT_ALLOW_FP16_PRECISION_FOR_FP32, static_cast(allow_fp16_precision_for_fp32), 0); } void add_use_burst_computation(bool use_burst_computation) { fbb_.AddElement(NNAPISettings::VT_USE_BURST_COMPUTATION, static_cast(use_burst_computation), 0); } void add_support_library_handle(int64_t support_library_handle) { fbb_.AddElement(NNAPISettings::VT_SUPPORT_LIBRARY_HANDLE, support_library_handle, 0); } explicit NNAPISettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateNNAPISettings( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset accelerator_name = 0, flatbuffers::Offset cache_directory = 0, flatbuffers::Offset model_token = 0, tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED, int32_t no_of_nnapi_instances_to_cache = 0, flatbuffers::Offset fallback_settings = 0, bool allow_nnapi_cpu_on_android_10_plus = false, tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, bool allow_dynamic_dimensions = false, bool allow_fp16_precision_for_fp32 = false, bool use_burst_computation = false, int64_t support_library_handle = 0) { NNAPISettingsBuilder builder_(_fbb); builder_.add_support_library_handle(support_library_handle); builder_.add_execution_priority(execution_priority); builder_.add_fallback_settings(fallback_settings); builder_.add_no_of_nnapi_instances_to_cache(no_of_nnapi_instances_to_cache); builder_.add_execution_preference(execution_preference); builder_.add_model_token(model_token); builder_.add_cache_directory(cache_directory); builder_.add_accelerator_name(accelerator_name); builder_.add_use_burst_computation(use_burst_computation); builder_.add_allow_fp16_precision_for_fp32(allow_fp16_precision_for_fp32); builder_.add_allow_dynamic_dimensions(allow_dynamic_dimensions); builder_.add_allow_nnapi_cpu_on_android_10_plus(allow_nnapi_cpu_on_android_10_plus); return builder_.Finish(); } inline flatbuffers::Offset CreateNNAPISettingsDirect( flatbuffers::FlatBufferBuilder &_fbb, const char *accelerator_name = nullptr, const char *cache_directory = nullptr, const char *model_token = nullptr, tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED, int32_t no_of_nnapi_instances_to_cache = 0, flatbuffers::Offset fallback_settings = 0, bool allow_nnapi_cpu_on_android_10_plus = false, tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, bool allow_dynamic_dimensions = false, bool allow_fp16_precision_for_fp32 = false, bool use_burst_computation = false, int64_t support_library_handle = 0) { auto accelerator_name__ = accelerator_name ? _fbb.CreateString(accelerator_name) : 0; auto cache_directory__ = cache_directory ? _fbb.CreateString(cache_directory) : 0; auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0; return tflite::CreateNNAPISettings( _fbb, accelerator_name__, cache_directory__, model_token__, execution_preference, no_of_nnapi_instances_to_cache, fallback_settings, allow_nnapi_cpu_on_android_10_plus, execution_priority, allow_dynamic_dimensions, allow_fp16_precision_for_fp32, use_burst_computation, support_library_handle); } flatbuffers::Offset CreateNNAPISettings(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct GPUSettingsT : public flatbuffers::NativeTable { typedef GPUSettings TableType; bool is_precision_loss_allowed = false; bool enable_quantized_inference = true; tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET; tflite::GPUInferencePriority inference_priority1 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO; tflite::GPUInferencePriority inference_priority2 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO; tflite::GPUInferencePriority inference_priority3 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO; tflite::GPUInferenceUsage inference_preference = tflite::GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER; std::string cache_directory{}; std::string model_token{}; }; struct GPUSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef GPUSettingsT NativeTableType; typedef GPUSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_IS_PRECISION_LOSS_ALLOWED = 4, VT_ENABLE_QUANTIZED_INFERENCE = 6, VT_FORCE_BACKEND = 8, VT_INFERENCE_PRIORITY1 = 10, VT_INFERENCE_PRIORITY2 = 12, VT_INFERENCE_PRIORITY3 = 14, VT_INFERENCE_PREFERENCE = 16, VT_CACHE_DIRECTORY = 18, VT_MODEL_TOKEN = 20 }; bool is_precision_loss_allowed() const { return GetField(VT_IS_PRECISION_LOSS_ALLOWED, 0) != 0; } bool enable_quantized_inference() const { return GetField(VT_ENABLE_QUANTIZED_INFERENCE, 1) != 0; } tflite::GPUBackend force_backend() const { return static_cast(GetField(VT_FORCE_BACKEND, 0)); } tflite::GPUInferencePriority inference_priority1() const { return static_cast(GetField(VT_INFERENCE_PRIORITY1, 0)); } tflite::GPUInferencePriority inference_priority2() const { return static_cast(GetField(VT_INFERENCE_PRIORITY2, 0)); } tflite::GPUInferencePriority inference_priority3() const { return static_cast(GetField(VT_INFERENCE_PRIORITY3, 0)); } tflite::GPUInferenceUsage inference_preference() const { return static_cast(GetField(VT_INFERENCE_PREFERENCE, 0)); } const flatbuffers::String *cache_directory() const { return GetPointer(VT_CACHE_DIRECTORY); } const flatbuffers::String *model_token() const { return GetPointer(VT_MODEL_TOKEN); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_IS_PRECISION_LOSS_ALLOWED, 1) && VerifyField(verifier, VT_ENABLE_QUANTIZED_INFERENCE, 1) && VerifyField(verifier, VT_FORCE_BACKEND, 4) && VerifyField(verifier, VT_INFERENCE_PRIORITY1, 4) && VerifyField(verifier, VT_INFERENCE_PRIORITY2, 4) && VerifyField(verifier, VT_INFERENCE_PRIORITY3, 4) && VerifyField(verifier, VT_INFERENCE_PREFERENCE, 4) && VerifyOffset(verifier, VT_CACHE_DIRECTORY) && verifier.VerifyString(cache_directory()) && VerifyOffset(verifier, VT_MODEL_TOKEN) && verifier.VerifyString(model_token()) && verifier.EndTable(); } GPUSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(GPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct GPUSettingsBuilder { typedef GPUSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_is_precision_loss_allowed(bool is_precision_loss_allowed) { fbb_.AddElement(GPUSettings::VT_IS_PRECISION_LOSS_ALLOWED, static_cast(is_precision_loss_allowed), 0); } void add_enable_quantized_inference(bool enable_quantized_inference) { fbb_.AddElement(GPUSettings::VT_ENABLE_QUANTIZED_INFERENCE, static_cast(enable_quantized_inference), 1); } void add_force_backend(tflite::GPUBackend force_backend) { fbb_.AddElement(GPUSettings::VT_FORCE_BACKEND, static_cast(force_backend), 0); } void add_inference_priority1(tflite::GPUInferencePriority inference_priority1) { fbb_.AddElement(GPUSettings::VT_INFERENCE_PRIORITY1, static_cast(inference_priority1), 0); } void add_inference_priority2(tflite::GPUInferencePriority inference_priority2) { fbb_.AddElement(GPUSettings::VT_INFERENCE_PRIORITY2, static_cast(inference_priority2), 0); } void add_inference_priority3(tflite::GPUInferencePriority inference_priority3) { fbb_.AddElement(GPUSettings::VT_INFERENCE_PRIORITY3, static_cast(inference_priority3), 0); } void add_inference_preference(tflite::GPUInferenceUsage inference_preference) { fbb_.AddElement(GPUSettings::VT_INFERENCE_PREFERENCE, static_cast(inference_preference), 0); } void add_cache_directory(flatbuffers::Offset cache_directory) { fbb_.AddOffset(GPUSettings::VT_CACHE_DIRECTORY, cache_directory); } void add_model_token(flatbuffers::Offset model_token) { fbb_.AddOffset(GPUSettings::VT_MODEL_TOKEN, model_token); } explicit GPUSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateGPUSettings( flatbuffers::FlatBufferBuilder &_fbb, bool is_precision_loss_allowed = false, bool enable_quantized_inference = true, tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET, tflite::GPUInferencePriority inference_priority1 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO, tflite::GPUInferencePriority inference_priority2 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO, tflite::GPUInferencePriority inference_priority3 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO, tflite::GPUInferenceUsage inference_preference = tflite::GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER, flatbuffers::Offset cache_directory = 0, flatbuffers::Offset model_token = 0) { GPUSettingsBuilder builder_(_fbb); builder_.add_model_token(model_token); builder_.add_cache_directory(cache_directory); builder_.add_inference_preference(inference_preference); builder_.add_inference_priority3(inference_priority3); builder_.add_inference_priority2(inference_priority2); builder_.add_inference_priority1(inference_priority1); builder_.add_force_backend(force_backend); builder_.add_enable_quantized_inference(enable_quantized_inference); builder_.add_is_precision_loss_allowed(is_precision_loss_allowed); return builder_.Finish(); } inline flatbuffers::Offset CreateGPUSettingsDirect( flatbuffers::FlatBufferBuilder &_fbb, bool is_precision_loss_allowed = false, bool enable_quantized_inference = true, tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET, tflite::GPUInferencePriority inference_priority1 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO, tflite::GPUInferencePriority inference_priority2 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO, tflite::GPUInferencePriority inference_priority3 = tflite::GPUInferencePriority_GPU_PRIORITY_AUTO, tflite::GPUInferenceUsage inference_preference = tflite::GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER, const char *cache_directory = nullptr, const char *model_token = nullptr) { auto cache_directory__ = cache_directory ? _fbb.CreateString(cache_directory) : 0; auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0; return tflite::CreateGPUSettings( _fbb, is_precision_loss_allowed, enable_quantized_inference, force_backend, inference_priority1, inference_priority2, inference_priority3, inference_preference, cache_directory__, model_token__); } flatbuffers::Offset CreateGPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct HexagonSettingsT : public flatbuffers::NativeTable { typedef HexagonSettings TableType; int32_t debug_level = 0; int32_t powersave_level = 0; bool print_graph_profile = false; bool print_graph_debug = false; }; struct HexagonSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef HexagonSettingsT NativeTableType; typedef HexagonSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_DEBUG_LEVEL = 4, VT_POWERSAVE_LEVEL = 6, VT_PRINT_GRAPH_PROFILE = 8, VT_PRINT_GRAPH_DEBUG = 10 }; int32_t debug_level() const { return GetField(VT_DEBUG_LEVEL, 0); } int32_t powersave_level() const { return GetField(VT_POWERSAVE_LEVEL, 0); } bool print_graph_profile() const { return GetField(VT_PRINT_GRAPH_PROFILE, 0) != 0; } bool print_graph_debug() const { return GetField(VT_PRINT_GRAPH_DEBUG, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_DEBUG_LEVEL, 4) && VerifyField(verifier, VT_POWERSAVE_LEVEL, 4) && VerifyField(verifier, VT_PRINT_GRAPH_PROFILE, 1) && VerifyField(verifier, VT_PRINT_GRAPH_DEBUG, 1) && verifier.EndTable(); } HexagonSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(HexagonSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct HexagonSettingsBuilder { typedef HexagonSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_debug_level(int32_t debug_level) { fbb_.AddElement(HexagonSettings::VT_DEBUG_LEVEL, debug_level, 0); } void add_powersave_level(int32_t powersave_level) { fbb_.AddElement(HexagonSettings::VT_POWERSAVE_LEVEL, powersave_level, 0); } void add_print_graph_profile(bool print_graph_profile) { fbb_.AddElement(HexagonSettings::VT_PRINT_GRAPH_PROFILE, static_cast(print_graph_profile), 0); } void add_print_graph_debug(bool print_graph_debug) { fbb_.AddElement(HexagonSettings::VT_PRINT_GRAPH_DEBUG, static_cast(print_graph_debug), 0); } explicit HexagonSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateHexagonSettings( flatbuffers::FlatBufferBuilder &_fbb, int32_t debug_level = 0, int32_t powersave_level = 0, bool print_graph_profile = false, bool print_graph_debug = false) { HexagonSettingsBuilder builder_(_fbb); builder_.add_powersave_level(powersave_level); builder_.add_debug_level(debug_level); builder_.add_print_graph_debug(print_graph_debug); builder_.add_print_graph_profile(print_graph_profile); return builder_.Finish(); } flatbuffers::Offset CreateHexagonSettings(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct XNNPackSettingsT : public flatbuffers::NativeTable { typedef XNNPackSettings TableType; int32_t num_threads = 0; tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS; }; struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef XNNPackSettingsT NativeTableType; typedef XNNPackSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUM_THREADS = 4, VT_FLAGS = 6 }; int32_t num_threads() const { return GetField(VT_NUM_THREADS, 0); } tflite::XNNPackFlags flags() const { return static_cast(GetField(VT_FLAGS, 0)); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_THREADS, 4) && VerifyField(verifier, VT_FLAGS, 4) && verifier.EndTable(); } XNNPackSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(XNNPackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct XNNPackSettingsBuilder { typedef XNNPackSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_num_threads(int32_t num_threads) { fbb_.AddElement(XNNPackSettings::VT_NUM_THREADS, num_threads, 0); } void add_flags(tflite::XNNPackFlags flags) { fbb_.AddElement(XNNPackSettings::VT_FLAGS, static_cast(flags), 0); } explicit XNNPackSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateXNNPackSettings( flatbuffers::FlatBufferBuilder &_fbb, int32_t num_threads = 0, tflite::XNNPackFlags flags = tflite::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_NO_FLAGS) { XNNPackSettingsBuilder builder_(_fbb); builder_.add_flags(flags); builder_.add_num_threads(num_threads); return builder_.Finish(); } flatbuffers::Offset CreateXNNPackSettings(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct CoreMLSettingsT : public flatbuffers::NativeTable { typedef CoreMLSettings TableType; tflite::CoreMLSettings_::EnabledDevices enabled_devices = tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL; int32_t coreml_version = 0; int32_t max_delegated_partitions = 0; int32_t min_nodes_per_partition = 2; }; struct CoreMLSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef CoreMLSettingsT NativeTableType; typedef CoreMLSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_ENABLED_DEVICES = 4, VT_COREML_VERSION = 6, VT_MAX_DELEGATED_PARTITIONS = 8, VT_MIN_NODES_PER_PARTITION = 10 }; tflite::CoreMLSettings_::EnabledDevices enabled_devices() const { return static_cast(GetField(VT_ENABLED_DEVICES, 0)); } int32_t coreml_version() const { return GetField(VT_COREML_VERSION, 0); } int32_t max_delegated_partitions() const { return GetField(VT_MAX_DELEGATED_PARTITIONS, 0); } int32_t min_nodes_per_partition() const { return GetField(VT_MIN_NODES_PER_PARTITION, 2); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ENABLED_DEVICES, 4) && VerifyField(verifier, VT_COREML_VERSION, 4) && VerifyField(verifier, VT_MAX_DELEGATED_PARTITIONS, 4) && VerifyField(verifier, VT_MIN_NODES_PER_PARTITION, 4) && verifier.EndTable(); } CoreMLSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(CoreMLSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct CoreMLSettingsBuilder { typedef CoreMLSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_enabled_devices(tflite::CoreMLSettings_::EnabledDevices enabled_devices) { fbb_.AddElement(CoreMLSettings::VT_ENABLED_DEVICES, static_cast(enabled_devices), 0); } void add_coreml_version(int32_t coreml_version) { fbb_.AddElement(CoreMLSettings::VT_COREML_VERSION, coreml_version, 0); } void add_max_delegated_partitions(int32_t max_delegated_partitions) { fbb_.AddElement(CoreMLSettings::VT_MAX_DELEGATED_PARTITIONS, max_delegated_partitions, 0); } void add_min_nodes_per_partition(int32_t min_nodes_per_partition) { fbb_.AddElement(CoreMLSettings::VT_MIN_NODES_PER_PARTITION, min_nodes_per_partition, 2); } explicit CoreMLSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateCoreMLSettings( flatbuffers::FlatBufferBuilder &_fbb, tflite::CoreMLSettings_::EnabledDevices enabled_devices = tflite::CoreMLSettings_::EnabledDevices_DEVICES_ALL, int32_t coreml_version = 0, int32_t max_delegated_partitions = 0, int32_t min_nodes_per_partition = 2) { CoreMLSettingsBuilder builder_(_fbb); builder_.add_min_nodes_per_partition(min_nodes_per_partition); builder_.add_max_delegated_partitions(max_delegated_partitions); builder_.add_coreml_version(coreml_version); builder_.add_enabled_devices(enabled_devices); return builder_.Finish(); } flatbuffers::Offset CreateCoreMLSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct EdgeTpuDeviceSpecT : public flatbuffers::NativeTable { typedef EdgeTpuDeviceSpec TableType; tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO; int32_t num_chips = 0; std::vector device_paths{}; int32_t chip_family = 0; }; struct EdgeTpuDeviceSpec FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef EdgeTpuDeviceSpecT NativeTableType; typedef EdgeTpuDeviceSpecBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_PLATFORM_TYPE = 4, VT_NUM_CHIPS = 6, VT_DEVICE_PATHS = 8, VT_CHIP_FAMILY = 10 }; tflite::EdgeTpuDeviceSpec_::PlatformType platform_type() const { return static_cast(GetField(VT_PLATFORM_TYPE, 0)); } int32_t num_chips() const { return GetField(VT_NUM_CHIPS, 0); } const flatbuffers::Vector> *device_paths() const { return GetPointer> *>(VT_DEVICE_PATHS); } int32_t chip_family() const { return GetField(VT_CHIP_FAMILY, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_PLATFORM_TYPE, 4) && VerifyField(verifier, VT_NUM_CHIPS, 4) && VerifyOffset(verifier, VT_DEVICE_PATHS) && verifier.VerifyVector(device_paths()) && verifier.VerifyVectorOfStrings(device_paths()) && VerifyField(verifier, VT_CHIP_FAMILY, 4) && verifier.EndTable(); } EdgeTpuDeviceSpecT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(EdgeTpuDeviceSpecT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct EdgeTpuDeviceSpecBuilder { typedef EdgeTpuDeviceSpec Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_platform_type(tflite::EdgeTpuDeviceSpec_::PlatformType platform_type) { fbb_.AddElement(EdgeTpuDeviceSpec::VT_PLATFORM_TYPE, static_cast(platform_type), 0); } void add_num_chips(int32_t num_chips) { fbb_.AddElement(EdgeTpuDeviceSpec::VT_NUM_CHIPS, num_chips, 0); } void add_device_paths(flatbuffers::Offset>> device_paths) { fbb_.AddOffset(EdgeTpuDeviceSpec::VT_DEVICE_PATHS, device_paths); } void add_chip_family(int32_t chip_family) { fbb_.AddElement(EdgeTpuDeviceSpec::VT_CHIP_FAMILY, chip_family, 0); } explicit EdgeTpuDeviceSpecBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateEdgeTpuDeviceSpec( flatbuffers::FlatBufferBuilder &_fbb, tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO, int32_t num_chips = 0, flatbuffers::Offset>> device_paths = 0, int32_t chip_family = 0) { EdgeTpuDeviceSpecBuilder builder_(_fbb); builder_.add_chip_family(chip_family); builder_.add_device_paths(device_paths); builder_.add_num_chips(num_chips); builder_.add_platform_type(platform_type); return builder_.Finish(); } inline flatbuffers::Offset CreateEdgeTpuDeviceSpecDirect( flatbuffers::FlatBufferBuilder &_fbb, tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO, int32_t num_chips = 0, const std::vector> *device_paths = nullptr, int32_t chip_family = 0) { auto device_paths__ = device_paths ? _fbb.CreateVector>(*device_paths) : 0; return tflite::CreateEdgeTpuDeviceSpec( _fbb, platform_type, num_chips, device_paths__, chip_family); } flatbuffers::Offset CreateEdgeTpuDeviceSpec(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct EdgeTpuInactivePowerConfigT : public flatbuffers::NativeTable { typedef EdgeTpuInactivePowerConfig TableType; tflite::EdgeTpuPowerState inactive_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE; int64_t inactive_timeout_us = 0; }; struct EdgeTpuInactivePowerConfig FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef EdgeTpuInactivePowerConfigT NativeTableType; typedef EdgeTpuInactivePowerConfigBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INACTIVE_POWER_STATE = 4, VT_INACTIVE_TIMEOUT_US = 6 }; tflite::EdgeTpuPowerState inactive_power_state() const { return static_cast(GetField(VT_INACTIVE_POWER_STATE, 0)); } int64_t inactive_timeout_us() const { return GetField(VT_INACTIVE_TIMEOUT_US, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_INACTIVE_POWER_STATE, 4) && VerifyField(verifier, VT_INACTIVE_TIMEOUT_US, 8) && verifier.EndTable(); } EdgeTpuInactivePowerConfigT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(EdgeTpuInactivePowerConfigT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct EdgeTpuInactivePowerConfigBuilder { typedef EdgeTpuInactivePowerConfig Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_inactive_power_state(tflite::EdgeTpuPowerState inactive_power_state) { fbb_.AddElement(EdgeTpuInactivePowerConfig::VT_INACTIVE_POWER_STATE, static_cast(inactive_power_state), 0); } void add_inactive_timeout_us(int64_t inactive_timeout_us) { fbb_.AddElement(EdgeTpuInactivePowerConfig::VT_INACTIVE_TIMEOUT_US, inactive_timeout_us, 0); } explicit EdgeTpuInactivePowerConfigBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateEdgeTpuInactivePowerConfig( flatbuffers::FlatBufferBuilder &_fbb, tflite::EdgeTpuPowerState inactive_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE, int64_t inactive_timeout_us = 0) { EdgeTpuInactivePowerConfigBuilder builder_(_fbb); builder_.add_inactive_timeout_us(inactive_timeout_us); builder_.add_inactive_power_state(inactive_power_state); return builder_.Finish(); } flatbuffers::Offset CreateEdgeTpuInactivePowerConfig(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct EdgeTpuSettingsT : public flatbuffers::NativeTable { typedef EdgeTpuSettings TableType; tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE; std::vector> inactive_power_configs{}; int32_t inference_priority = -1; std::unique_ptr edgetpu_device_spec{}; std::string model_token{}; tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type = tflite::EdgeTpuSettings_::FloatTruncationType_UNSPECIFIED; tflite::EdgeTpuSettings_::QosClass qos_class = tflite::EdgeTpuSettings_::QosClass_QOS_UNDEFINED; EdgeTpuSettingsT() = default; EdgeTpuSettingsT(const EdgeTpuSettingsT &o); EdgeTpuSettingsT(EdgeTpuSettingsT&&) FLATBUFFERS_NOEXCEPT = default; EdgeTpuSettingsT &operator=(EdgeTpuSettingsT o) FLATBUFFERS_NOEXCEPT; }; struct EdgeTpuSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef EdgeTpuSettingsT NativeTableType; typedef EdgeTpuSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INFERENCE_POWER_STATE = 4, VT_INACTIVE_POWER_CONFIGS = 6, VT_INFERENCE_PRIORITY = 8, VT_EDGETPU_DEVICE_SPEC = 10, VT_MODEL_TOKEN = 12, VT_FLOAT_TRUNCATION_TYPE = 14, VT_QOS_CLASS = 16 }; tflite::EdgeTpuPowerState inference_power_state() const { return static_cast(GetField(VT_INFERENCE_POWER_STATE, 0)); } const flatbuffers::Vector> *inactive_power_configs() const { return GetPointer> *>(VT_INACTIVE_POWER_CONFIGS); } int32_t inference_priority() const { return GetField(VT_INFERENCE_PRIORITY, -1); } const tflite::EdgeTpuDeviceSpec *edgetpu_device_spec() const { return GetPointer(VT_EDGETPU_DEVICE_SPEC); } const flatbuffers::String *model_token() const { return GetPointer(VT_MODEL_TOKEN); } tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type() const { return static_cast(GetField(VT_FLOAT_TRUNCATION_TYPE, 0)); } tflite::EdgeTpuSettings_::QosClass qos_class() const { return static_cast(GetField(VT_QOS_CLASS, 0)); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_INFERENCE_POWER_STATE, 4) && VerifyOffset(verifier, VT_INACTIVE_POWER_CONFIGS) && verifier.VerifyVector(inactive_power_configs()) && verifier.VerifyVectorOfTables(inactive_power_configs()) && VerifyField(verifier, VT_INFERENCE_PRIORITY, 4) && VerifyOffset(verifier, VT_EDGETPU_DEVICE_SPEC) && verifier.VerifyTable(edgetpu_device_spec()) && VerifyOffset(verifier, VT_MODEL_TOKEN) && verifier.VerifyString(model_token()) && VerifyField(verifier, VT_FLOAT_TRUNCATION_TYPE, 4) && VerifyField(verifier, VT_QOS_CLASS, 4) && verifier.EndTable(); } EdgeTpuSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(EdgeTpuSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct EdgeTpuSettingsBuilder { typedef EdgeTpuSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_inference_power_state(tflite::EdgeTpuPowerState inference_power_state) { fbb_.AddElement(EdgeTpuSettings::VT_INFERENCE_POWER_STATE, static_cast(inference_power_state), 0); } void add_inactive_power_configs(flatbuffers::Offset>> inactive_power_configs) { fbb_.AddOffset(EdgeTpuSettings::VT_INACTIVE_POWER_CONFIGS, inactive_power_configs); } void add_inference_priority(int32_t inference_priority) { fbb_.AddElement(EdgeTpuSettings::VT_INFERENCE_PRIORITY, inference_priority, -1); } void add_edgetpu_device_spec(flatbuffers::Offset edgetpu_device_spec) { fbb_.AddOffset(EdgeTpuSettings::VT_EDGETPU_DEVICE_SPEC, edgetpu_device_spec); } void add_model_token(flatbuffers::Offset model_token) { fbb_.AddOffset(EdgeTpuSettings::VT_MODEL_TOKEN, model_token); } void add_float_truncation_type(tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type) { fbb_.AddElement(EdgeTpuSettings::VT_FLOAT_TRUNCATION_TYPE, static_cast(float_truncation_type), 0); } void add_qos_class(tflite::EdgeTpuSettings_::QosClass qos_class) { fbb_.AddElement(EdgeTpuSettings::VT_QOS_CLASS, static_cast(qos_class), 0); } explicit EdgeTpuSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateEdgeTpuSettings( flatbuffers::FlatBufferBuilder &_fbb, tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE, flatbuffers::Offset>> inactive_power_configs = 0, int32_t inference_priority = -1, flatbuffers::Offset edgetpu_device_spec = 0, flatbuffers::Offset model_token = 0, tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type = tflite::EdgeTpuSettings_::FloatTruncationType_UNSPECIFIED, tflite::EdgeTpuSettings_::QosClass qos_class = tflite::EdgeTpuSettings_::QosClass_QOS_UNDEFINED) { EdgeTpuSettingsBuilder builder_(_fbb); builder_.add_qos_class(qos_class); builder_.add_float_truncation_type(float_truncation_type); builder_.add_model_token(model_token); builder_.add_edgetpu_device_spec(edgetpu_device_spec); builder_.add_inference_priority(inference_priority); builder_.add_inactive_power_configs(inactive_power_configs); builder_.add_inference_power_state(inference_power_state); return builder_.Finish(); } inline flatbuffers::Offset CreateEdgeTpuSettingsDirect( flatbuffers::FlatBufferBuilder &_fbb, tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE, const std::vector> *inactive_power_configs = nullptr, int32_t inference_priority = -1, flatbuffers::Offset edgetpu_device_spec = 0, const char *model_token = nullptr, tflite::EdgeTpuSettings_::FloatTruncationType float_truncation_type = tflite::EdgeTpuSettings_::FloatTruncationType_UNSPECIFIED, tflite::EdgeTpuSettings_::QosClass qos_class = tflite::EdgeTpuSettings_::QosClass_QOS_UNDEFINED) { auto inactive_power_configs__ = inactive_power_configs ? _fbb.CreateVector>(*inactive_power_configs) : 0; auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0; return tflite::CreateEdgeTpuSettings( _fbb, inference_power_state, inactive_power_configs__, inference_priority, edgetpu_device_spec, model_token__, float_truncation_type, qos_class); } flatbuffers::Offset CreateEdgeTpuSettings(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct CoralSettingsT : public flatbuffers::NativeTable { typedef CoralSettings TableType; std::string device{}; tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED; bool usb_always_dfu = false; int32_t usb_max_bulk_in_queue_length = 0; }; struct CoralSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef CoralSettingsT NativeTableType; typedef CoralSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_DEVICE = 4, VT_PERFORMANCE = 6, VT_USB_ALWAYS_DFU = 8, VT_USB_MAX_BULK_IN_QUEUE_LENGTH = 10 }; const flatbuffers::String *device() const { return GetPointer(VT_DEVICE); } tflite::CoralSettings_::Performance performance() const { return static_cast(GetField(VT_PERFORMANCE, 0)); } bool usb_always_dfu() const { return GetField(VT_USB_ALWAYS_DFU, 0) != 0; } int32_t usb_max_bulk_in_queue_length() const { return GetField(VT_USB_MAX_BULK_IN_QUEUE_LENGTH, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_DEVICE) && verifier.VerifyString(device()) && VerifyField(verifier, VT_PERFORMANCE, 4) && VerifyField(verifier, VT_USB_ALWAYS_DFU, 1) && VerifyField(verifier, VT_USB_MAX_BULK_IN_QUEUE_LENGTH, 4) && verifier.EndTable(); } CoralSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(CoralSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct CoralSettingsBuilder { typedef CoralSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_device(flatbuffers::Offset device) { fbb_.AddOffset(CoralSettings::VT_DEVICE, device); } void add_performance(tflite::CoralSettings_::Performance performance) { fbb_.AddElement(CoralSettings::VT_PERFORMANCE, static_cast(performance), 0); } void add_usb_always_dfu(bool usb_always_dfu) { fbb_.AddElement(CoralSettings::VT_USB_ALWAYS_DFU, static_cast(usb_always_dfu), 0); } void add_usb_max_bulk_in_queue_length(int32_t usb_max_bulk_in_queue_length) { fbb_.AddElement(CoralSettings::VT_USB_MAX_BULK_IN_QUEUE_LENGTH, usb_max_bulk_in_queue_length, 0); } explicit CoralSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateCoralSettings( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset device = 0, tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED, bool usb_always_dfu = false, int32_t usb_max_bulk_in_queue_length = 0) { CoralSettingsBuilder builder_(_fbb); builder_.add_usb_max_bulk_in_queue_length(usb_max_bulk_in_queue_length); builder_.add_performance(performance); builder_.add_device(device); builder_.add_usb_always_dfu(usb_always_dfu); return builder_.Finish(); } inline flatbuffers::Offset CreateCoralSettingsDirect( flatbuffers::FlatBufferBuilder &_fbb, const char *device = nullptr, tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED, bool usb_always_dfu = false, int32_t usb_max_bulk_in_queue_length = 0) { auto device__ = device ? _fbb.CreateString(device) : 0; return tflite::CreateCoralSettings( _fbb, device__, performance, usb_always_dfu, usb_max_bulk_in_queue_length); } flatbuffers::Offset CreateCoralSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct CPUSettingsT : public flatbuffers::NativeTable { typedef CPUSettings TableType; int32_t num_threads = -1; }; struct CPUSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef CPUSettingsT NativeTableType; typedef CPUSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUM_THREADS = 4 }; int32_t num_threads() const { return GetField(VT_NUM_THREADS, -1); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUM_THREADS, 4) && verifier.EndTable(); } CPUSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(CPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct CPUSettingsBuilder { typedef CPUSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_num_threads(int32_t num_threads) { fbb_.AddElement(CPUSettings::VT_NUM_THREADS, num_threads, -1); } explicit CPUSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateCPUSettings( flatbuffers::FlatBufferBuilder &_fbb, int32_t num_threads = -1) { CPUSettingsBuilder builder_(_fbb); builder_.add_num_threads(num_threads); return builder_.Finish(); } flatbuffers::Offset CreateCPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct TFLiteSettingsT : public flatbuffers::NativeTable { typedef TFLiteSettings TableType; tflite::Delegate delegate = tflite::Delegate_NONE; std::unique_ptr nnapi_settings{}; std::unique_ptr gpu_settings{}; std::unique_ptr hexagon_settings{}; std::unique_ptr xnnpack_settings{}; std::unique_ptr coreml_settings{}; std::unique_ptr cpu_settings{}; int32_t max_delegated_partitions = 0; std::unique_ptr edgetpu_settings{}; std::unique_ptr coral_settings{}; std::unique_ptr fallback_settings{}; bool disable_default_delegates = false; TFLiteSettingsT() = default; TFLiteSettingsT(const TFLiteSettingsT &o); TFLiteSettingsT(TFLiteSettingsT&&) FLATBUFFERS_NOEXCEPT = default; TFLiteSettingsT &operator=(TFLiteSettingsT o) FLATBUFFERS_NOEXCEPT; }; struct TFLiteSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef TFLiteSettingsT NativeTableType; typedef TFLiteSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_DELEGATE = 4, VT_NNAPI_SETTINGS = 6, VT_GPU_SETTINGS = 8, VT_HEXAGON_SETTINGS = 10, VT_XNNPACK_SETTINGS = 12, VT_COREML_SETTINGS = 14, VT_CPU_SETTINGS = 16, VT_MAX_DELEGATED_PARTITIONS = 18, VT_EDGETPU_SETTINGS = 20, VT_CORAL_SETTINGS = 22, VT_FALLBACK_SETTINGS = 24, VT_DISABLE_DEFAULT_DELEGATES = 26 }; tflite::Delegate delegate() const { return static_cast(GetField(VT_DELEGATE, 0)); } const tflite::NNAPISettings *nnapi_settings() const { return GetPointer(VT_NNAPI_SETTINGS); } const tflite::GPUSettings *gpu_settings() const { return GetPointer(VT_GPU_SETTINGS); } const tflite::HexagonSettings *hexagon_settings() const { return GetPointer(VT_HEXAGON_SETTINGS); } const tflite::XNNPackSettings *xnnpack_settings() const { return GetPointer(VT_XNNPACK_SETTINGS); } const tflite::CoreMLSettings *coreml_settings() const { return GetPointer(VT_COREML_SETTINGS); } const tflite::CPUSettings *cpu_settings() const { return GetPointer(VT_CPU_SETTINGS); } int32_t max_delegated_partitions() const { return GetField(VT_MAX_DELEGATED_PARTITIONS, 0); } const tflite::EdgeTpuSettings *edgetpu_settings() const { return GetPointer(VT_EDGETPU_SETTINGS); } const tflite::CoralSettings *coral_settings() const { return GetPointer(VT_CORAL_SETTINGS); } const tflite::FallbackSettings *fallback_settings() const { return GetPointer(VT_FALLBACK_SETTINGS); } bool disable_default_delegates() const { return GetField(VT_DISABLE_DEFAULT_DELEGATES, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_DELEGATE, 4) && VerifyOffset(verifier, VT_NNAPI_SETTINGS) && verifier.VerifyTable(nnapi_settings()) && VerifyOffset(verifier, VT_GPU_SETTINGS) && verifier.VerifyTable(gpu_settings()) && VerifyOffset(verifier, VT_HEXAGON_SETTINGS) && verifier.VerifyTable(hexagon_settings()) && VerifyOffset(verifier, VT_XNNPACK_SETTINGS) && verifier.VerifyTable(xnnpack_settings()) && VerifyOffset(verifier, VT_COREML_SETTINGS) && verifier.VerifyTable(coreml_settings()) && VerifyOffset(verifier, VT_CPU_SETTINGS) && verifier.VerifyTable(cpu_settings()) && VerifyField(verifier, VT_MAX_DELEGATED_PARTITIONS, 4) && VerifyOffset(verifier, VT_EDGETPU_SETTINGS) && verifier.VerifyTable(edgetpu_settings()) && VerifyOffset(verifier, VT_CORAL_SETTINGS) && verifier.VerifyTable(coral_settings()) && VerifyOffset(verifier, VT_FALLBACK_SETTINGS) && verifier.VerifyTable(fallback_settings()) && VerifyField(verifier, VT_DISABLE_DEFAULT_DELEGATES, 1) && verifier.EndTable(); } TFLiteSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(TFLiteSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct TFLiteSettingsBuilder { typedef TFLiteSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_delegate(tflite::Delegate delegate) { fbb_.AddElement(TFLiteSettings::VT_DELEGATE, static_cast(delegate), 0); } void add_nnapi_settings(flatbuffers::Offset nnapi_settings) { fbb_.AddOffset(TFLiteSettings::VT_NNAPI_SETTINGS, nnapi_settings); } void add_gpu_settings(flatbuffers::Offset gpu_settings) { fbb_.AddOffset(TFLiteSettings::VT_GPU_SETTINGS, gpu_settings); } void add_hexagon_settings(flatbuffers::Offset hexagon_settings) { fbb_.AddOffset(TFLiteSettings::VT_HEXAGON_SETTINGS, hexagon_settings); } void add_xnnpack_settings(flatbuffers::Offset xnnpack_settings) { fbb_.AddOffset(TFLiteSettings::VT_XNNPACK_SETTINGS, xnnpack_settings); } void add_coreml_settings(flatbuffers::Offset coreml_settings) { fbb_.AddOffset(TFLiteSettings::VT_COREML_SETTINGS, coreml_settings); } void add_cpu_settings(flatbuffers::Offset cpu_settings) { fbb_.AddOffset(TFLiteSettings::VT_CPU_SETTINGS, cpu_settings); } void add_max_delegated_partitions(int32_t max_delegated_partitions) { fbb_.AddElement(TFLiteSettings::VT_MAX_DELEGATED_PARTITIONS, max_delegated_partitions, 0); } void add_edgetpu_settings(flatbuffers::Offset edgetpu_settings) { fbb_.AddOffset(TFLiteSettings::VT_EDGETPU_SETTINGS, edgetpu_settings); } void add_coral_settings(flatbuffers::Offset coral_settings) { fbb_.AddOffset(TFLiteSettings::VT_CORAL_SETTINGS, coral_settings); } void add_fallback_settings(flatbuffers::Offset fallback_settings) { fbb_.AddOffset(TFLiteSettings::VT_FALLBACK_SETTINGS, fallback_settings); } void add_disable_default_delegates(bool disable_default_delegates) { fbb_.AddElement(TFLiteSettings::VT_DISABLE_DEFAULT_DELEGATES, static_cast(disable_default_delegates), 0); } explicit TFLiteSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateTFLiteSettings( flatbuffers::FlatBufferBuilder &_fbb, tflite::Delegate delegate = tflite::Delegate_NONE, flatbuffers::Offset nnapi_settings = 0, flatbuffers::Offset gpu_settings = 0, flatbuffers::Offset hexagon_settings = 0, flatbuffers::Offset xnnpack_settings = 0, flatbuffers::Offset coreml_settings = 0, flatbuffers::Offset cpu_settings = 0, int32_t max_delegated_partitions = 0, flatbuffers::Offset edgetpu_settings = 0, flatbuffers::Offset coral_settings = 0, flatbuffers::Offset fallback_settings = 0, bool disable_default_delegates = false) { TFLiteSettingsBuilder builder_(_fbb); builder_.add_fallback_settings(fallback_settings); builder_.add_coral_settings(coral_settings); builder_.add_edgetpu_settings(edgetpu_settings); builder_.add_max_delegated_partitions(max_delegated_partitions); builder_.add_cpu_settings(cpu_settings); builder_.add_coreml_settings(coreml_settings); builder_.add_xnnpack_settings(xnnpack_settings); builder_.add_hexagon_settings(hexagon_settings); builder_.add_gpu_settings(gpu_settings); builder_.add_nnapi_settings(nnapi_settings); builder_.add_delegate(delegate); builder_.add_disable_default_delegates(disable_default_delegates); return builder_.Finish(); } flatbuffers::Offset CreateTFLiteSettings(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct FallbackSettingsT : public flatbuffers::NativeTable { typedef FallbackSettings TableType; bool allow_automatic_fallback_on_compilation_error = false; bool allow_automatic_fallback_on_execution_error = false; }; struct FallbackSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef FallbackSettingsT NativeTableType; typedef FallbackSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR = 4, VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR = 6 }; bool allow_automatic_fallback_on_compilation_error() const { return GetField(VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, 0) != 0; } bool allow_automatic_fallback_on_execution_error() const { return GetField(VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, 0) != 0; } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, 1) && VerifyField(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, 1) && verifier.EndTable(); } FallbackSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(FallbackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct FallbackSettingsBuilder { typedef FallbackSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_allow_automatic_fallback_on_compilation_error(bool allow_automatic_fallback_on_compilation_error) { fbb_.AddElement(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, static_cast(allow_automatic_fallback_on_compilation_error), 0); } void add_allow_automatic_fallback_on_execution_error(bool allow_automatic_fallback_on_execution_error) { fbb_.AddElement(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, static_cast(allow_automatic_fallback_on_execution_error), 0); } explicit FallbackSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateFallbackSettings( flatbuffers::FlatBufferBuilder &_fbb, bool allow_automatic_fallback_on_compilation_error = false, bool allow_automatic_fallback_on_execution_error = false) { FallbackSettingsBuilder builder_(_fbb); builder_.add_allow_automatic_fallback_on_execution_error(allow_automatic_fallback_on_execution_error); builder_.add_allow_automatic_fallback_on_compilation_error(allow_automatic_fallback_on_compilation_error); return builder_.Finish(); } flatbuffers::Offset CreateFallbackSettings(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BenchmarkMetricT : public flatbuffers::NativeTable { typedef BenchmarkMetric TableType; std::string name{}; std::vector values{}; }; struct BenchmarkMetric FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BenchmarkMetricT NativeTableType; typedef BenchmarkMetricBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NAME = 4, VT_VALUES = 6 }; const flatbuffers::String *name() const { return GetPointer(VT_NAME); } const flatbuffers::Vector *values() const { return GetPointer *>(VT_VALUES); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_NAME) && verifier.VerifyString(name()) && VerifyOffset(verifier, VT_VALUES) && verifier.VerifyVector(values()) && verifier.EndTable(); } BenchmarkMetricT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BenchmarkMetricT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BenchmarkMetricBuilder { typedef BenchmarkMetric Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_name(flatbuffers::Offset name) { fbb_.AddOffset(BenchmarkMetric::VT_NAME, name); } void add_values(flatbuffers::Offset> values) { fbb_.AddOffset(BenchmarkMetric::VT_VALUES, values); } explicit BenchmarkMetricBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBenchmarkMetric( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset name = 0, flatbuffers::Offset> values = 0) { BenchmarkMetricBuilder builder_(_fbb); builder_.add_values(values); builder_.add_name(name); return builder_.Finish(); } inline flatbuffers::Offset CreateBenchmarkMetricDirect( flatbuffers::FlatBufferBuilder &_fbb, const char *name = nullptr, const std::vector *values = nullptr) { auto name__ = name ? _fbb.CreateString(name) : 0; auto values__ = values ? _fbb.CreateVector(*values) : 0; return tflite::CreateBenchmarkMetric( _fbb, name__, values__); } flatbuffers::Offset CreateBenchmarkMetric(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BenchmarkResultT : public flatbuffers::NativeTable { typedef BenchmarkResult TableType; std::vector initialization_time_us{}; std::vector inference_time_us{}; int32_t max_memory_kb = 0; bool ok = false; std::vector> metrics{}; BenchmarkResultT() = default; BenchmarkResultT(const BenchmarkResultT &o); BenchmarkResultT(BenchmarkResultT&&) FLATBUFFERS_NOEXCEPT = default; BenchmarkResultT &operator=(BenchmarkResultT o) FLATBUFFERS_NOEXCEPT; }; struct BenchmarkResult FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BenchmarkResultT NativeTableType; typedef BenchmarkResultBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INITIALIZATION_TIME_US = 4, VT_INFERENCE_TIME_US = 6, VT_MAX_MEMORY_KB = 8, VT_OK = 10, VT_METRICS = 12 }; const flatbuffers::Vector *initialization_time_us() const { return GetPointer *>(VT_INITIALIZATION_TIME_US); } const flatbuffers::Vector *inference_time_us() const { return GetPointer *>(VT_INFERENCE_TIME_US); } int32_t max_memory_kb() const { return GetField(VT_MAX_MEMORY_KB, 0); } bool ok() const { return GetField(VT_OK, 0) != 0; } const flatbuffers::Vector> *metrics() const { return GetPointer> *>(VT_METRICS); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_INITIALIZATION_TIME_US) && verifier.VerifyVector(initialization_time_us()) && VerifyOffset(verifier, VT_INFERENCE_TIME_US) && verifier.VerifyVector(inference_time_us()) && VerifyField(verifier, VT_MAX_MEMORY_KB, 4) && VerifyField(verifier, VT_OK, 1) && VerifyOffset(verifier, VT_METRICS) && verifier.VerifyVector(metrics()) && verifier.VerifyVectorOfTables(metrics()) && verifier.EndTable(); } BenchmarkResultT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BenchmarkResultT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BenchmarkResultBuilder { typedef BenchmarkResult Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_initialization_time_us(flatbuffers::Offset> initialization_time_us) { fbb_.AddOffset(BenchmarkResult::VT_INITIALIZATION_TIME_US, initialization_time_us); } void add_inference_time_us(flatbuffers::Offset> inference_time_us) { fbb_.AddOffset(BenchmarkResult::VT_INFERENCE_TIME_US, inference_time_us); } void add_max_memory_kb(int32_t max_memory_kb) { fbb_.AddElement(BenchmarkResult::VT_MAX_MEMORY_KB, max_memory_kb, 0); } void add_ok(bool ok) { fbb_.AddElement(BenchmarkResult::VT_OK, static_cast(ok), 0); } void add_metrics(flatbuffers::Offset>> metrics) { fbb_.AddOffset(BenchmarkResult::VT_METRICS, metrics); } explicit BenchmarkResultBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBenchmarkResult( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset> initialization_time_us = 0, flatbuffers::Offset> inference_time_us = 0, int32_t max_memory_kb = 0, bool ok = false, flatbuffers::Offset>> metrics = 0) { BenchmarkResultBuilder builder_(_fbb); builder_.add_metrics(metrics); builder_.add_max_memory_kb(max_memory_kb); builder_.add_inference_time_us(inference_time_us); builder_.add_initialization_time_us(initialization_time_us); builder_.add_ok(ok); return builder_.Finish(); } inline flatbuffers::Offset CreateBenchmarkResultDirect( flatbuffers::FlatBufferBuilder &_fbb, const std::vector *initialization_time_us = nullptr, const std::vector *inference_time_us = nullptr, int32_t max_memory_kb = 0, bool ok = false, const std::vector> *metrics = nullptr) { auto initialization_time_us__ = initialization_time_us ? _fbb.CreateVector(*initialization_time_us) : 0; auto inference_time_us__ = inference_time_us ? _fbb.CreateVector(*inference_time_us) : 0; auto metrics__ = metrics ? _fbb.CreateVector>(*metrics) : 0; return tflite::CreateBenchmarkResult( _fbb, initialization_time_us__, inference_time_us__, max_memory_kb, ok, metrics__); } flatbuffers::Offset CreateBenchmarkResult(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct ErrorCodeT : public flatbuffers::NativeTable { typedef ErrorCode TableType; tflite::Delegate source = tflite::Delegate_NONE; int32_t tflite_error = 0; int64_t underlying_api_error = 0; }; struct ErrorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ErrorCodeT NativeTableType; typedef ErrorCodeBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_SOURCE = 4, VT_TFLITE_ERROR = 6, VT_UNDERLYING_API_ERROR = 8 }; tflite::Delegate source() const { return static_cast(GetField(VT_SOURCE, 0)); } int32_t tflite_error() const { return GetField(VT_TFLITE_ERROR, 0); } int64_t underlying_api_error() const { return GetField(VT_UNDERLYING_API_ERROR, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_SOURCE, 4) && VerifyField(verifier, VT_TFLITE_ERROR, 4) && VerifyField(verifier, VT_UNDERLYING_API_ERROR, 8) && verifier.EndTable(); } ErrorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(ErrorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct ErrorCodeBuilder { typedef ErrorCode Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_source(tflite::Delegate source) { fbb_.AddElement(ErrorCode::VT_SOURCE, static_cast(source), 0); } void add_tflite_error(int32_t tflite_error) { fbb_.AddElement(ErrorCode::VT_TFLITE_ERROR, tflite_error, 0); } void add_underlying_api_error(int64_t underlying_api_error) { fbb_.AddElement(ErrorCode::VT_UNDERLYING_API_ERROR, underlying_api_error, 0); } explicit ErrorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateErrorCode( flatbuffers::FlatBufferBuilder &_fbb, tflite::Delegate source = tflite::Delegate_NONE, int32_t tflite_error = 0, int64_t underlying_api_error = 0) { ErrorCodeBuilder builder_(_fbb); builder_.add_underlying_api_error(underlying_api_error); builder_.add_tflite_error(tflite_error); builder_.add_source(source); return builder_.Finish(); } flatbuffers::Offset CreateErrorCode(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BenchmarkErrorT : public flatbuffers::NativeTable { typedef BenchmarkError TableType; tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN; int32_t exit_code = 0; int32_t signal = 0; std::vector> error_code{}; int32_t mini_benchmark_error_code = 0; BenchmarkErrorT() = default; BenchmarkErrorT(const BenchmarkErrorT &o); BenchmarkErrorT(BenchmarkErrorT&&) FLATBUFFERS_NOEXCEPT = default; BenchmarkErrorT &operator=(BenchmarkErrorT o) FLATBUFFERS_NOEXCEPT; }; struct BenchmarkError FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BenchmarkErrorT NativeTableType; typedef BenchmarkErrorBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_STAGE = 4, VT_EXIT_CODE = 6, VT_SIGNAL = 8, VT_ERROR_CODE = 10, VT_MINI_BENCHMARK_ERROR_CODE = 12 }; tflite::BenchmarkStage stage() const { return static_cast(GetField(VT_STAGE, 0)); } int32_t exit_code() const { return GetField(VT_EXIT_CODE, 0); } int32_t signal() const { return GetField(VT_SIGNAL, 0); } const flatbuffers::Vector> *error_code() const { return GetPointer> *>(VT_ERROR_CODE); } int32_t mini_benchmark_error_code() const { return GetField(VT_MINI_BENCHMARK_ERROR_CODE, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_STAGE, 4) && VerifyField(verifier, VT_EXIT_CODE, 4) && VerifyField(verifier, VT_SIGNAL, 4) && VerifyOffset(verifier, VT_ERROR_CODE) && verifier.VerifyVector(error_code()) && verifier.VerifyVectorOfTables(error_code()) && VerifyField(verifier, VT_MINI_BENCHMARK_ERROR_CODE, 4) && verifier.EndTable(); } BenchmarkErrorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BenchmarkErrorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BenchmarkErrorBuilder { typedef BenchmarkError Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_stage(tflite::BenchmarkStage stage) { fbb_.AddElement(BenchmarkError::VT_STAGE, static_cast(stage), 0); } void add_exit_code(int32_t exit_code) { fbb_.AddElement(BenchmarkError::VT_EXIT_CODE, exit_code, 0); } void add_signal(int32_t signal) { fbb_.AddElement(BenchmarkError::VT_SIGNAL, signal, 0); } void add_error_code(flatbuffers::Offset>> error_code) { fbb_.AddOffset(BenchmarkError::VT_ERROR_CODE, error_code); } void add_mini_benchmark_error_code(int32_t mini_benchmark_error_code) { fbb_.AddElement(BenchmarkError::VT_MINI_BENCHMARK_ERROR_CODE, mini_benchmark_error_code, 0); } explicit BenchmarkErrorBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBenchmarkError( flatbuffers::FlatBufferBuilder &_fbb, tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN, int32_t exit_code = 0, int32_t signal = 0, flatbuffers::Offset>> error_code = 0, int32_t mini_benchmark_error_code = 0) { BenchmarkErrorBuilder builder_(_fbb); builder_.add_mini_benchmark_error_code(mini_benchmark_error_code); builder_.add_error_code(error_code); builder_.add_signal(signal); builder_.add_exit_code(exit_code); builder_.add_stage(stage); return builder_.Finish(); } inline flatbuffers::Offset CreateBenchmarkErrorDirect( flatbuffers::FlatBufferBuilder &_fbb, tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN, int32_t exit_code = 0, int32_t signal = 0, const std::vector> *error_code = nullptr, int32_t mini_benchmark_error_code = 0) { auto error_code__ = error_code ? _fbb.CreateVector>(*error_code) : 0; return tflite::CreateBenchmarkError( _fbb, stage, exit_code, signal, error_code__, mini_benchmark_error_code); } flatbuffers::Offset CreateBenchmarkError(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BenchmarkEventT : public flatbuffers::NativeTable { typedef BenchmarkEvent TableType; std::unique_ptr tflite_settings{}; tflite::BenchmarkEventType event_type = tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE; std::unique_ptr result{}; std::unique_ptr error{}; int64_t boottime_us = 0; int64_t wallclock_us = 0; BenchmarkEventT() = default; BenchmarkEventT(const BenchmarkEventT &o); BenchmarkEventT(BenchmarkEventT&&) FLATBUFFERS_NOEXCEPT = default; BenchmarkEventT &operator=(BenchmarkEventT o) FLATBUFFERS_NOEXCEPT; }; struct BenchmarkEvent FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BenchmarkEventT NativeTableType; typedef BenchmarkEventBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_TFLITE_SETTINGS = 4, VT_EVENT_TYPE = 6, VT_RESULT = 8, VT_ERROR = 10, VT_BOOTTIME_US = 12, VT_WALLCLOCK_US = 14 }; const tflite::TFLiteSettings *tflite_settings() const { return GetPointer(VT_TFLITE_SETTINGS); } tflite::BenchmarkEventType event_type() const { return static_cast(GetField(VT_EVENT_TYPE, 0)); } const tflite::BenchmarkResult *result() const { return GetPointer(VT_RESULT); } const tflite::BenchmarkError *error() const { return GetPointer(VT_ERROR); } int64_t boottime_us() const { return GetField(VT_BOOTTIME_US, 0); } int64_t wallclock_us() const { return GetField(VT_WALLCLOCK_US, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_TFLITE_SETTINGS) && verifier.VerifyTable(tflite_settings()) && VerifyField(verifier, VT_EVENT_TYPE, 4) && VerifyOffset(verifier, VT_RESULT) && verifier.VerifyTable(result()) && VerifyOffset(verifier, VT_ERROR) && verifier.VerifyTable(error()) && VerifyField(verifier, VT_BOOTTIME_US, 8) && VerifyField(verifier, VT_WALLCLOCK_US, 8) && verifier.EndTable(); } BenchmarkEventT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BenchmarkEventBuilder { typedef BenchmarkEvent Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_tflite_settings(flatbuffers::Offset tflite_settings) { fbb_.AddOffset(BenchmarkEvent::VT_TFLITE_SETTINGS, tflite_settings); } void add_event_type(tflite::BenchmarkEventType event_type) { fbb_.AddElement(BenchmarkEvent::VT_EVENT_TYPE, static_cast(event_type), 0); } void add_result(flatbuffers::Offset result) { fbb_.AddOffset(BenchmarkEvent::VT_RESULT, result); } void add_error(flatbuffers::Offset error) { fbb_.AddOffset(BenchmarkEvent::VT_ERROR, error); } void add_boottime_us(int64_t boottime_us) { fbb_.AddElement(BenchmarkEvent::VT_BOOTTIME_US, boottime_us, 0); } void add_wallclock_us(int64_t wallclock_us) { fbb_.AddElement(BenchmarkEvent::VT_WALLCLOCK_US, wallclock_us, 0); } explicit BenchmarkEventBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBenchmarkEvent( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset tflite_settings = 0, tflite::BenchmarkEventType event_type = tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, flatbuffers::Offset result = 0, flatbuffers::Offset error = 0, int64_t boottime_us = 0, int64_t wallclock_us = 0) { BenchmarkEventBuilder builder_(_fbb); builder_.add_wallclock_us(wallclock_us); builder_.add_boottime_us(boottime_us); builder_.add_error(error); builder_.add_result(result); builder_.add_event_type(event_type); builder_.add_tflite_settings(tflite_settings); return builder_.Finish(); } flatbuffers::Offset CreateBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BestAccelerationDecisionT : public flatbuffers::NativeTable { typedef BestAccelerationDecision TableType; int32_t number_of_source_events = 0; std::unique_ptr min_latency_event{}; int64_t min_inference_time_us = 0; BestAccelerationDecisionT() = default; BestAccelerationDecisionT(const BestAccelerationDecisionT &o); BestAccelerationDecisionT(BestAccelerationDecisionT&&) FLATBUFFERS_NOEXCEPT = default; BestAccelerationDecisionT &operator=(BestAccelerationDecisionT o) FLATBUFFERS_NOEXCEPT; }; struct BestAccelerationDecision FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BestAccelerationDecisionT NativeTableType; typedef BestAccelerationDecisionBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_NUMBER_OF_SOURCE_EVENTS = 4, VT_MIN_LATENCY_EVENT = 6, VT_MIN_INFERENCE_TIME_US = 8 }; int32_t number_of_source_events() const { return GetField(VT_NUMBER_OF_SOURCE_EVENTS, 0); } const tflite::BenchmarkEvent *min_latency_event() const { return GetPointer(VT_MIN_LATENCY_EVENT); } int64_t min_inference_time_us() const { return GetField(VT_MIN_INFERENCE_TIME_US, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_NUMBER_OF_SOURCE_EVENTS, 4) && VerifyOffset(verifier, VT_MIN_LATENCY_EVENT) && verifier.VerifyTable(min_latency_event()) && VerifyField(verifier, VT_MIN_INFERENCE_TIME_US, 8) && verifier.EndTable(); } BestAccelerationDecisionT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BestAccelerationDecisionT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BestAccelerationDecisionBuilder { typedef BestAccelerationDecision Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_number_of_source_events(int32_t number_of_source_events) { fbb_.AddElement(BestAccelerationDecision::VT_NUMBER_OF_SOURCE_EVENTS, number_of_source_events, 0); } void add_min_latency_event(flatbuffers::Offset min_latency_event) { fbb_.AddOffset(BestAccelerationDecision::VT_MIN_LATENCY_EVENT, min_latency_event); } void add_min_inference_time_us(int64_t min_inference_time_us) { fbb_.AddElement(BestAccelerationDecision::VT_MIN_INFERENCE_TIME_US, min_inference_time_us, 0); } explicit BestAccelerationDecisionBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBestAccelerationDecision( flatbuffers::FlatBufferBuilder &_fbb, int32_t number_of_source_events = 0, flatbuffers::Offset min_latency_event = 0, int64_t min_inference_time_us = 0) { BestAccelerationDecisionBuilder builder_(_fbb); builder_.add_min_inference_time_us(min_inference_time_us); builder_.add_min_latency_event(min_latency_event); builder_.add_number_of_source_events(number_of_source_events); return builder_.Finish(); } flatbuffers::Offset CreateBestAccelerationDecision(flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BenchmarkInitializationFailureT : public flatbuffers::NativeTable { typedef BenchmarkInitializationFailure TableType; int32_t initialization_status = 0; }; struct BenchmarkInitializationFailure FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BenchmarkInitializationFailureT NativeTableType; typedef BenchmarkInitializationFailureBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_INITIALIZATION_STATUS = 4 }; int32_t initialization_status() const { return GetField(VT_INITIALIZATION_STATUS, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_INITIALIZATION_STATUS, 4) && verifier.EndTable(); } BenchmarkInitializationFailureT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BenchmarkInitializationFailureT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BenchmarkInitializationFailureBuilder { typedef BenchmarkInitializationFailure Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_initialization_status(int32_t initialization_status) { fbb_.AddElement(BenchmarkInitializationFailure::VT_INITIALIZATION_STATUS, initialization_status, 0); } explicit BenchmarkInitializationFailureBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBenchmarkInitializationFailure( flatbuffers::FlatBufferBuilder &_fbb, int32_t initialization_status = 0) { BenchmarkInitializationFailureBuilder builder_(_fbb); builder_.add_initialization_status(initialization_status); return builder_.Finish(); } flatbuffers::Offset CreateBenchmarkInitializationFailure(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct MiniBenchmarkEventT : public flatbuffers::NativeTable { typedef MiniBenchmarkEvent TableType; bool is_log_flushing_event = false; std::unique_ptr best_acceleration_decision{}; std::unique_ptr initialization_failure{}; std::unique_ptr benchmark_event{}; MiniBenchmarkEventT() = default; MiniBenchmarkEventT(const MiniBenchmarkEventT &o); MiniBenchmarkEventT(MiniBenchmarkEventT&&) FLATBUFFERS_NOEXCEPT = default; MiniBenchmarkEventT &operator=(MiniBenchmarkEventT o) FLATBUFFERS_NOEXCEPT; }; struct MiniBenchmarkEvent FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef MiniBenchmarkEventT NativeTableType; typedef MiniBenchmarkEventBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_IS_LOG_FLUSHING_EVENT = 4, VT_BEST_ACCELERATION_DECISION = 6, VT_INITIALIZATION_FAILURE = 8, VT_BENCHMARK_EVENT = 10 }; bool is_log_flushing_event() const { return GetField(VT_IS_LOG_FLUSHING_EVENT, 0) != 0; } const tflite::BestAccelerationDecision *best_acceleration_decision() const { return GetPointer(VT_BEST_ACCELERATION_DECISION); } const tflite::BenchmarkInitializationFailure *initialization_failure() const { return GetPointer(VT_INITIALIZATION_FAILURE); } const tflite::BenchmarkEvent *benchmark_event() const { return GetPointer(VT_BENCHMARK_EVENT); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyField(verifier, VT_IS_LOG_FLUSHING_EVENT, 1) && VerifyOffset(verifier, VT_BEST_ACCELERATION_DECISION) && verifier.VerifyTable(best_acceleration_decision()) && VerifyOffset(verifier, VT_INITIALIZATION_FAILURE) && verifier.VerifyTable(initialization_failure()) && VerifyOffset(verifier, VT_BENCHMARK_EVENT) && verifier.VerifyTable(benchmark_event()) && verifier.EndTable(); } MiniBenchmarkEventT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(MiniBenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct MiniBenchmarkEventBuilder { typedef MiniBenchmarkEvent Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_is_log_flushing_event(bool is_log_flushing_event) { fbb_.AddElement(MiniBenchmarkEvent::VT_IS_LOG_FLUSHING_EVENT, static_cast(is_log_flushing_event), 0); } void add_best_acceleration_decision(flatbuffers::Offset best_acceleration_decision) { fbb_.AddOffset(MiniBenchmarkEvent::VT_BEST_ACCELERATION_DECISION, best_acceleration_decision); } void add_initialization_failure(flatbuffers::Offset initialization_failure) { fbb_.AddOffset(MiniBenchmarkEvent::VT_INITIALIZATION_FAILURE, initialization_failure); } void add_benchmark_event(flatbuffers::Offset benchmark_event) { fbb_.AddOffset(MiniBenchmarkEvent::VT_BENCHMARK_EVENT, benchmark_event); } explicit MiniBenchmarkEventBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateMiniBenchmarkEvent( flatbuffers::FlatBufferBuilder &_fbb, bool is_log_flushing_event = false, flatbuffers::Offset best_acceleration_decision = 0, flatbuffers::Offset initialization_failure = 0, flatbuffers::Offset benchmark_event = 0) { MiniBenchmarkEventBuilder builder_(_fbb); builder_.add_benchmark_event(benchmark_event); builder_.add_initialization_failure(initialization_failure); builder_.add_best_acceleration_decision(best_acceleration_decision); builder_.add_is_log_flushing_event(is_log_flushing_event); return builder_.Finish(); } flatbuffers::Offset CreateMiniBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct ModelFileT : public flatbuffers::NativeTable { typedef ModelFile TableType; std::string filename{}; int64_t fd = 0; int64_t offset = 0; int64_t length = 0; }; struct ModelFile FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef ModelFileT NativeTableType; typedef ModelFileBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_FILENAME = 4, VT_FD = 6, VT_OFFSET = 8, VT_LENGTH = 10 }; const flatbuffers::String *filename() const { return GetPointer(VT_FILENAME); } int64_t fd() const { return GetField(VT_FD, 0); } int64_t offset() const { return GetField(VT_OFFSET, 0); } int64_t length() const { return GetField(VT_LENGTH, 0); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_FILENAME) && verifier.VerifyString(filename()) && VerifyField(verifier, VT_FD, 8) && VerifyField(verifier, VT_OFFSET, 8) && VerifyField(verifier, VT_LENGTH, 8) && verifier.EndTable(); } ModelFileT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(ModelFileT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct ModelFileBuilder { typedef ModelFile Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_filename(flatbuffers::Offset filename) { fbb_.AddOffset(ModelFile::VT_FILENAME, filename); } void add_fd(int64_t fd) { fbb_.AddElement(ModelFile::VT_FD, fd, 0); } void add_offset(int64_t offset) { fbb_.AddElement(ModelFile::VT_OFFSET, offset, 0); } void add_length(int64_t length) { fbb_.AddElement(ModelFile::VT_LENGTH, length, 0); } explicit ModelFileBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateModelFile( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset filename = 0, int64_t fd = 0, int64_t offset = 0, int64_t length = 0) { ModelFileBuilder builder_(_fbb); builder_.add_length(length); builder_.add_offset(offset); builder_.add_fd(fd); builder_.add_filename(filename); return builder_.Finish(); } inline flatbuffers::Offset CreateModelFileDirect( flatbuffers::FlatBufferBuilder &_fbb, const char *filename = nullptr, int64_t fd = 0, int64_t offset = 0, int64_t length = 0) { auto filename__ = filename ? _fbb.CreateString(filename) : 0; return tflite::CreateModelFile( _fbb, filename__, fd, offset, length); } flatbuffers::Offset CreateModelFile(flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct BenchmarkStoragePathsT : public flatbuffers::NativeTable { typedef BenchmarkStoragePaths TableType; std::string storage_file_path{}; std::string data_directory_path{}; }; struct BenchmarkStoragePaths FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef BenchmarkStoragePathsT NativeTableType; typedef BenchmarkStoragePathsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_STORAGE_FILE_PATH = 4, VT_DATA_DIRECTORY_PATH = 6 }; const flatbuffers::String *storage_file_path() const { return GetPointer(VT_STORAGE_FILE_PATH); } const flatbuffers::String *data_directory_path() const { return GetPointer(VT_DATA_DIRECTORY_PATH); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_STORAGE_FILE_PATH) && verifier.VerifyString(storage_file_path()) && VerifyOffset(verifier, VT_DATA_DIRECTORY_PATH) && verifier.VerifyString(data_directory_path()) && verifier.EndTable(); } BenchmarkStoragePathsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(BenchmarkStoragePathsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct BenchmarkStoragePathsBuilder { typedef BenchmarkStoragePaths Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_storage_file_path(flatbuffers::Offset storage_file_path) { fbb_.AddOffset(BenchmarkStoragePaths::VT_STORAGE_FILE_PATH, storage_file_path); } void add_data_directory_path(flatbuffers::Offset data_directory_path) { fbb_.AddOffset(BenchmarkStoragePaths::VT_DATA_DIRECTORY_PATH, data_directory_path); } explicit BenchmarkStoragePathsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateBenchmarkStoragePaths( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset storage_file_path = 0, flatbuffers::Offset data_directory_path = 0) { BenchmarkStoragePathsBuilder builder_(_fbb); builder_.add_data_directory_path(data_directory_path); builder_.add_storage_file_path(storage_file_path); return builder_.Finish(); } inline flatbuffers::Offset CreateBenchmarkStoragePathsDirect( flatbuffers::FlatBufferBuilder &_fbb, const char *storage_file_path = nullptr, const char *data_directory_path = nullptr) { auto storage_file_path__ = storage_file_path ? _fbb.CreateString(storage_file_path) : 0; auto data_directory_path__ = data_directory_path ? _fbb.CreateString(data_directory_path) : 0; return tflite::CreateBenchmarkStoragePaths( _fbb, storage_file_path__, data_directory_path__); } flatbuffers::Offset CreateBenchmarkStoragePaths(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); struct MinibenchmarkSettingsT : public flatbuffers::NativeTable { typedef MinibenchmarkSettings TableType; std::vector> settings_to_test{}; std::unique_ptr model_file{}; std::unique_ptr storage_paths{}; MinibenchmarkSettingsT() = default; MinibenchmarkSettingsT(const MinibenchmarkSettingsT &o); MinibenchmarkSettingsT(MinibenchmarkSettingsT&&) FLATBUFFERS_NOEXCEPT = default; MinibenchmarkSettingsT &operator=(MinibenchmarkSettingsT o) FLATBUFFERS_NOEXCEPT; }; struct MinibenchmarkSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { typedef MinibenchmarkSettingsT NativeTableType; typedef MinibenchmarkSettingsBuilder Builder; enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { VT_SETTINGS_TO_TEST = 4, VT_MODEL_FILE = 6, VT_STORAGE_PATHS = 8 }; const flatbuffers::Vector> *settings_to_test() const { return GetPointer> *>(VT_SETTINGS_TO_TEST); } const tflite::ModelFile *model_file() const { return GetPointer(VT_MODEL_FILE); } const tflite::BenchmarkStoragePaths *storage_paths() const { return GetPointer(VT_STORAGE_PATHS); } bool Verify(flatbuffers::Verifier &verifier) const { return VerifyTableStart(verifier) && VerifyOffset(verifier, VT_SETTINGS_TO_TEST) && verifier.VerifyVector(settings_to_test()) && verifier.VerifyVectorOfTables(settings_to_test()) && VerifyOffset(verifier, VT_MODEL_FILE) && verifier.VerifyTable(model_file()) && VerifyOffset(verifier, VT_STORAGE_PATHS) && verifier.VerifyTable(storage_paths()) && verifier.EndTable(); } MinibenchmarkSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; void UnPackTo(MinibenchmarkSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); }; struct MinibenchmarkSettingsBuilder { typedef MinibenchmarkSettings Table; flatbuffers::FlatBufferBuilder &fbb_; flatbuffers::uoffset_t start_; void add_settings_to_test(flatbuffers::Offset>> settings_to_test) { fbb_.AddOffset(MinibenchmarkSettings::VT_SETTINGS_TO_TEST, settings_to_test); } void add_model_file(flatbuffers::Offset model_file) { fbb_.AddOffset(MinibenchmarkSettings::VT_MODEL_FILE, model_file); } void add_storage_paths(flatbuffers::Offset storage_paths) { fbb_.AddOffset(MinibenchmarkSettings::VT_STORAGE_PATHS, storage_paths); } explicit MinibenchmarkSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb) : fbb_(_fbb) { start_ = fbb_.StartTable(); } flatbuffers::Offset Finish() { const auto end = fbb_.EndTable(start_); auto o = flatbuffers::Offset(end); return o; } }; inline flatbuffers::Offset CreateMinibenchmarkSettings( flatbuffers::FlatBufferBuilder &_fbb, flatbuffers::Offset>> settings_to_test = 0, flatbuffers::Offset model_file = 0, flatbuffers::Offset storage_paths = 0) { MinibenchmarkSettingsBuilder builder_(_fbb); builder_.add_storage_paths(storage_paths); builder_.add_model_file(model_file); builder_.add_settings_to_test(settings_to_test); return builder_.Finish(); } inline flatbuffers::Offset CreateMinibenchmarkSettingsDirect( flatbuffers::FlatBufferBuilder &_fbb, const std::vector> *settings_to_test = nullptr, flatbuffers::Offset model_file = 0, flatbuffers::Offset storage_paths = 0) { auto settings_to_test__ = settings_to_test ? _fbb.CreateVector>(*settings_to_test) : 0; return tflite::CreateMinibenchmarkSettings( _fbb, settings_to_test__, model_file, storage_paths); } flatbuffers::Offset CreateMinibenchmarkSettings(flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); inline bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) { return (lhs.preference == rhs.preference) && ((lhs.tflite_settings == rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings)) && (lhs.model_namespace_for_statistics == rhs.model_namespace_for_statistics) && (lhs.model_identifier_for_statistics == rhs.model_identifier_for_statistics) && ((lhs.settings_to_test_locally == rhs.settings_to_test_locally) || (lhs.settings_to_test_locally && rhs.settings_to_test_locally && *lhs.settings_to_test_locally == *rhs.settings_to_test_locally)); } inline bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) { return !(lhs == rhs); } inline ComputeSettingsT::ComputeSettingsT(const ComputeSettingsT &o) : preference(o.preference), tflite_settings((o.tflite_settings) ? new tflite::TFLiteSettingsT(*o.tflite_settings) : nullptr), model_namespace_for_statistics(o.model_namespace_for_statistics), model_identifier_for_statistics(o.model_identifier_for_statistics), settings_to_test_locally((o.settings_to_test_locally) ? new tflite::MinibenchmarkSettingsT(*o.settings_to_test_locally) : nullptr) { } inline ComputeSettingsT &ComputeSettingsT::operator=(ComputeSettingsT o) FLATBUFFERS_NOEXCEPT { std::swap(preference, o.preference); std::swap(tflite_settings, o.tflite_settings); std::swap(model_namespace_for_statistics, o.model_namespace_for_statistics); std::swap(model_identifier_for_statistics, o.model_identifier_for_statistics); std::swap(settings_to_test_locally, o.settings_to_test_locally); return *this; } inline ComputeSettingsT *ComputeSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new ComputeSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void ComputeSettings::UnPackTo(ComputeSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = preference(); _o->preference = _e; } { auto _e = tflite_settings(); if (_e) { if(_o->tflite_settings) { _e->UnPackTo(_o->tflite_settings.get(), _resolver); } else { _o->tflite_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = model_namespace_for_statistics(); if (_e) _o->model_namespace_for_statistics = _e->str(); } { auto _e = model_identifier_for_statistics(); if (_e) _o->model_identifier_for_statistics = _e->str(); } { auto _e = settings_to_test_locally(); if (_e) { if(_o->settings_to_test_locally) { _e->UnPackTo(_o->settings_to_test_locally.get(), _resolver); } else { _o->settings_to_test_locally = std::unique_ptr(_e->UnPack(_resolver)); } } } } inline flatbuffers::Offset ComputeSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateComputeSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateComputeSettings(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ComputeSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _preference = _o->preference; auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0; auto _model_namespace_for_statistics = _o->model_namespace_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_namespace_for_statistics); auto _model_identifier_for_statistics = _o->model_identifier_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_identifier_for_statistics); auto _settings_to_test_locally = _o->settings_to_test_locally ? CreateMinibenchmarkSettings(_fbb, _o->settings_to_test_locally.get(), _rehasher) : 0; return tflite::CreateComputeSettings( _fbb, _preference, _tflite_settings, _model_namespace_for_statistics, _model_identifier_for_statistics, _settings_to_test_locally); } inline bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) { return (lhs.accelerator_name == rhs.accelerator_name) && (lhs.cache_directory == rhs.cache_directory) && (lhs.model_token == rhs.model_token) && (lhs.execution_preference == rhs.execution_preference) && (lhs.no_of_nnapi_instances_to_cache == rhs.no_of_nnapi_instances_to_cache) && ((lhs.fallback_settings == rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings)) && (lhs.allow_nnapi_cpu_on_android_10_plus == rhs.allow_nnapi_cpu_on_android_10_plus) && (lhs.execution_priority == rhs.execution_priority) && (lhs.allow_dynamic_dimensions == rhs.allow_dynamic_dimensions) && (lhs.allow_fp16_precision_for_fp32 == rhs.allow_fp16_precision_for_fp32) && (lhs.use_burst_computation == rhs.use_burst_computation) && (lhs.support_library_handle == rhs.support_library_handle); } inline bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) { return !(lhs == rhs); } inline NNAPISettingsT::NNAPISettingsT(const NNAPISettingsT &o) : accelerator_name(o.accelerator_name), cache_directory(o.cache_directory), model_token(o.model_token), execution_preference(o.execution_preference), no_of_nnapi_instances_to_cache(o.no_of_nnapi_instances_to_cache), fallback_settings((o.fallback_settings) ? new tflite::FallbackSettingsT(*o.fallback_settings) : nullptr), allow_nnapi_cpu_on_android_10_plus(o.allow_nnapi_cpu_on_android_10_plus), execution_priority(o.execution_priority), allow_dynamic_dimensions(o.allow_dynamic_dimensions), allow_fp16_precision_for_fp32(o.allow_fp16_precision_for_fp32), use_burst_computation(o.use_burst_computation), support_library_handle(o.support_library_handle) { } inline NNAPISettingsT &NNAPISettingsT::operator=(NNAPISettingsT o) FLATBUFFERS_NOEXCEPT { std::swap(accelerator_name, o.accelerator_name); std::swap(cache_directory, o.cache_directory); std::swap(model_token, o.model_token); std::swap(execution_preference, o.execution_preference); std::swap(no_of_nnapi_instances_to_cache, o.no_of_nnapi_instances_to_cache); std::swap(fallback_settings, o.fallback_settings); std::swap(allow_nnapi_cpu_on_android_10_plus, o.allow_nnapi_cpu_on_android_10_plus); std::swap(execution_priority, o.execution_priority); std::swap(allow_dynamic_dimensions, o.allow_dynamic_dimensions); std::swap(allow_fp16_precision_for_fp32, o.allow_fp16_precision_for_fp32); std::swap(use_burst_computation, o.use_burst_computation); std::swap(support_library_handle, o.support_library_handle); return *this; } inline NNAPISettingsT *NNAPISettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new NNAPISettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void NNAPISettings::UnPackTo(NNAPISettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = accelerator_name(); if (_e) _o->accelerator_name = _e->str(); } { auto _e = cache_directory(); if (_e) _o->cache_directory = _e->str(); } { auto _e = model_token(); if (_e) _o->model_token = _e->str(); } { auto _e = execution_preference(); _o->execution_preference = _e; } { auto _e = no_of_nnapi_instances_to_cache(); _o->no_of_nnapi_instances_to_cache = _e; } { auto _e = fallback_settings(); if (_e) { if(_o->fallback_settings) { _e->UnPackTo(_o->fallback_settings.get(), _resolver); } else { _o->fallback_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = allow_nnapi_cpu_on_android_10_plus(); _o->allow_nnapi_cpu_on_android_10_plus = _e; } { auto _e = execution_priority(); _o->execution_priority = _e; } { auto _e = allow_dynamic_dimensions(); _o->allow_dynamic_dimensions = _e; } { auto _e = allow_fp16_precision_for_fp32(); _o->allow_fp16_precision_for_fp32 = _e; } { auto _e = use_burst_computation(); _o->use_burst_computation = _e; } { auto _e = support_library_handle(); _o->support_library_handle = _e; } } inline flatbuffers::Offset NNAPISettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateNNAPISettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateNNAPISettings(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NNAPISettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _accelerator_name = _o->accelerator_name.empty() ? 0 : _fbb.CreateString(_o->accelerator_name); auto _cache_directory = _o->cache_directory.empty() ? 0 : _fbb.CreateString(_o->cache_directory); auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token); auto _execution_preference = _o->execution_preference; auto _no_of_nnapi_instances_to_cache = _o->no_of_nnapi_instances_to_cache; auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0; auto _allow_nnapi_cpu_on_android_10_plus = _o->allow_nnapi_cpu_on_android_10_plus; auto _execution_priority = _o->execution_priority; auto _allow_dynamic_dimensions = _o->allow_dynamic_dimensions; auto _allow_fp16_precision_for_fp32 = _o->allow_fp16_precision_for_fp32; auto _use_burst_computation = _o->use_burst_computation; auto _support_library_handle = _o->support_library_handle; return tflite::CreateNNAPISettings( _fbb, _accelerator_name, _cache_directory, _model_token, _execution_preference, _no_of_nnapi_instances_to_cache, _fallback_settings, _allow_nnapi_cpu_on_android_10_plus, _execution_priority, _allow_dynamic_dimensions, _allow_fp16_precision_for_fp32, _use_burst_computation, _support_library_handle); } inline bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs) { return (lhs.is_precision_loss_allowed == rhs.is_precision_loss_allowed) && (lhs.enable_quantized_inference == rhs.enable_quantized_inference) && (lhs.force_backend == rhs.force_backend) && (lhs.inference_priority1 == rhs.inference_priority1) && (lhs.inference_priority2 == rhs.inference_priority2) && (lhs.inference_priority3 == rhs.inference_priority3) && (lhs.inference_preference == rhs.inference_preference) && (lhs.cache_directory == rhs.cache_directory) && (lhs.model_token == rhs.model_token); } inline bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs) { return !(lhs == rhs); } inline GPUSettingsT *GPUSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new GPUSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void GPUSettings::UnPackTo(GPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = is_precision_loss_allowed(); _o->is_precision_loss_allowed = _e; } { auto _e = enable_quantized_inference(); _o->enable_quantized_inference = _e; } { auto _e = force_backend(); _o->force_backend = _e; } { auto _e = inference_priority1(); _o->inference_priority1 = _e; } { auto _e = inference_priority2(); _o->inference_priority2 = _e; } { auto _e = inference_priority3(); _o->inference_priority3 = _e; } { auto _e = inference_preference(); _o->inference_preference = _e; } { auto _e = cache_directory(); if (_e) _o->cache_directory = _e->str(); } { auto _e = model_token(); if (_e) _o->model_token = _e->str(); } } inline flatbuffers::Offset GPUSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateGPUSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateGPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GPUSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _is_precision_loss_allowed = _o->is_precision_loss_allowed; auto _enable_quantized_inference = _o->enable_quantized_inference; auto _force_backend = _o->force_backend; auto _inference_priority1 = _o->inference_priority1; auto _inference_priority2 = _o->inference_priority2; auto _inference_priority3 = _o->inference_priority3; auto _inference_preference = _o->inference_preference; auto _cache_directory = _o->cache_directory.empty() ? 0 : _fbb.CreateString(_o->cache_directory); auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token); return tflite::CreateGPUSettings( _fbb, _is_precision_loss_allowed, _enable_quantized_inference, _force_backend, _inference_priority1, _inference_priority2, _inference_priority3, _inference_preference, _cache_directory, _model_token); } inline bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) { return (lhs.debug_level == rhs.debug_level) && (lhs.powersave_level == rhs.powersave_level) && (lhs.print_graph_profile == rhs.print_graph_profile) && (lhs.print_graph_debug == rhs.print_graph_debug); } inline bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) { return !(lhs == rhs); } inline HexagonSettingsT *HexagonSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new HexagonSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void HexagonSettings::UnPackTo(HexagonSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = debug_level(); _o->debug_level = _e; } { auto _e = powersave_level(); _o->powersave_level = _e; } { auto _e = print_graph_profile(); _o->print_graph_profile = _e; } { auto _e = print_graph_debug(); _o->print_graph_debug = _e; } } inline flatbuffers::Offset HexagonSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateHexagonSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateHexagonSettings(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HexagonSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _debug_level = _o->debug_level; auto _powersave_level = _o->powersave_level; auto _print_graph_profile = _o->print_graph_profile; auto _print_graph_debug = _o->print_graph_debug; return tflite::CreateHexagonSettings( _fbb, _debug_level, _powersave_level, _print_graph_profile, _print_graph_debug); } inline bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) { return (lhs.num_threads == rhs.num_threads) && (lhs.flags == rhs.flags); } inline bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) { return !(lhs == rhs); } inline XNNPackSettingsT *XNNPackSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new XNNPackSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void XNNPackSettings::UnPackTo(XNNPackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = num_threads(); _o->num_threads = _e; } { auto _e = flags(); _o->flags = _e; } } inline flatbuffers::Offset XNNPackSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateXNNPackSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateXNNPackSettings(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const XNNPackSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _num_threads = _o->num_threads; auto _flags = _o->flags; return tflite::CreateXNNPackSettings( _fbb, _num_threads, _flags); } inline bool operator==(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs) { return (lhs.enabled_devices == rhs.enabled_devices) && (lhs.coreml_version == rhs.coreml_version) && (lhs.max_delegated_partitions == rhs.max_delegated_partitions) && (lhs.min_nodes_per_partition == rhs.min_nodes_per_partition); } inline bool operator!=(const CoreMLSettingsT &lhs, const CoreMLSettingsT &rhs) { return !(lhs == rhs); } inline CoreMLSettingsT *CoreMLSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new CoreMLSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void CoreMLSettings::UnPackTo(CoreMLSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = enabled_devices(); _o->enabled_devices = _e; } { auto _e = coreml_version(); _o->coreml_version = _e; } { auto _e = max_delegated_partitions(); _o->max_delegated_partitions = _e; } { auto _e = min_nodes_per_partition(); _o->min_nodes_per_partition = _e; } } inline flatbuffers::Offset CoreMLSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateCoreMLSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateCoreMLSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoreMLSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CoreMLSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _enabled_devices = _o->enabled_devices; auto _coreml_version = _o->coreml_version; auto _max_delegated_partitions = _o->max_delegated_partitions; auto _min_nodes_per_partition = _o->min_nodes_per_partition; return tflite::CreateCoreMLSettings( _fbb, _enabled_devices, _coreml_version, _max_delegated_partitions, _min_nodes_per_partition); } inline bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) { return (lhs.platform_type == rhs.platform_type) && (lhs.num_chips == rhs.num_chips) && (lhs.device_paths == rhs.device_paths) && (lhs.chip_family == rhs.chip_family); } inline bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) { return !(lhs == rhs); } inline EdgeTpuDeviceSpecT *EdgeTpuDeviceSpec::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new EdgeTpuDeviceSpecT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void EdgeTpuDeviceSpec::UnPackTo(EdgeTpuDeviceSpecT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = platform_type(); _o->platform_type = _e; } { auto _e = num_chips(); _o->num_chips = _e; } { auto _e = device_paths(); if (_e) { _o->device_paths.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->device_paths[_i] = _e->Get(_i)->str(); } } } { auto _e = chip_family(); _o->chip_family = _e; } } inline flatbuffers::Offset EdgeTpuDeviceSpec::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateEdgeTpuDeviceSpec(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateEdgeTpuDeviceSpec(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuDeviceSpecT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _platform_type = _o->platform_type; auto _num_chips = _o->num_chips; auto _device_paths = _o->device_paths.size() ? _fbb.CreateVectorOfStrings(_o->device_paths) : 0; auto _chip_family = _o->chip_family; return tflite::CreateEdgeTpuDeviceSpec( _fbb, _platform_type, _num_chips, _device_paths, _chip_family); } inline bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) { return (lhs.inactive_power_state == rhs.inactive_power_state) && (lhs.inactive_timeout_us == rhs.inactive_timeout_us); } inline bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) { return !(lhs == rhs); } inline EdgeTpuInactivePowerConfigT *EdgeTpuInactivePowerConfig::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new EdgeTpuInactivePowerConfigT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void EdgeTpuInactivePowerConfig::UnPackTo(EdgeTpuInactivePowerConfigT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = inactive_power_state(); _o->inactive_power_state = _e; } { auto _e = inactive_timeout_us(); _o->inactive_timeout_us = _e; } } inline flatbuffers::Offset EdgeTpuInactivePowerConfig::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateEdgeTpuInactivePowerConfig(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateEdgeTpuInactivePowerConfig(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuInactivePowerConfigT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _inactive_power_state = _o->inactive_power_state; auto _inactive_timeout_us = _o->inactive_timeout_us; return tflite::CreateEdgeTpuInactivePowerConfig( _fbb, _inactive_power_state, _inactive_timeout_us); } inline bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) { return (lhs.inference_power_state == rhs.inference_power_state) && (lhs.inactive_power_configs == rhs.inactive_power_configs) && (lhs.inference_priority == rhs.inference_priority) && ((lhs.edgetpu_device_spec == rhs.edgetpu_device_spec) || (lhs.edgetpu_device_spec && rhs.edgetpu_device_spec && *lhs.edgetpu_device_spec == *rhs.edgetpu_device_spec)) && (lhs.model_token == rhs.model_token) && (lhs.float_truncation_type == rhs.float_truncation_type) && (lhs.qos_class == rhs.qos_class); } inline bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) { return !(lhs == rhs); } inline EdgeTpuSettingsT::EdgeTpuSettingsT(const EdgeTpuSettingsT &o) : inference_power_state(o.inference_power_state), inference_priority(o.inference_priority), edgetpu_device_spec((o.edgetpu_device_spec) ? new tflite::EdgeTpuDeviceSpecT(*o.edgetpu_device_spec) : nullptr), model_token(o.model_token), float_truncation_type(o.float_truncation_type), qos_class(o.qos_class) { inactive_power_configs.reserve(o.inactive_power_configs.size()); for (const auto &v : o.inactive_power_configs) { inactive_power_configs.emplace_back((v) ? new tflite::EdgeTpuInactivePowerConfigT(*v) : nullptr); } } inline EdgeTpuSettingsT &EdgeTpuSettingsT::operator=(EdgeTpuSettingsT o) FLATBUFFERS_NOEXCEPT { std::swap(inference_power_state, o.inference_power_state); std::swap(inactive_power_configs, o.inactive_power_configs); std::swap(inference_priority, o.inference_priority); std::swap(edgetpu_device_spec, o.edgetpu_device_spec); std::swap(model_token, o.model_token); std::swap(float_truncation_type, o.float_truncation_type); std::swap(qos_class, o.qos_class); return *this; } inline EdgeTpuSettingsT *EdgeTpuSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new EdgeTpuSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void EdgeTpuSettings::UnPackTo(EdgeTpuSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = inference_power_state(); _o->inference_power_state = _e; } { auto _e = inactive_power_configs(); if (_e) { _o->inactive_power_configs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inactive_power_configs[_i]) { _e->Get(_i)->UnPackTo(_o->inactive_power_configs[_i].get(), _resolver); } else { _o->inactive_power_configs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } { auto _e = inference_priority(); _o->inference_priority = _e; } { auto _e = edgetpu_device_spec(); if (_e) { if(_o->edgetpu_device_spec) { _e->UnPackTo(_o->edgetpu_device_spec.get(), _resolver); } else { _o->edgetpu_device_spec = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = model_token(); if (_e) _o->model_token = _e->str(); } { auto _e = float_truncation_type(); _o->float_truncation_type = _e; } { auto _e = qos_class(); _o->qos_class = _e; } } inline flatbuffers::Offset EdgeTpuSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateEdgeTpuSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateEdgeTpuSettings(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _inference_power_state = _o->inference_power_state; auto _inactive_power_configs = _o->inactive_power_configs.size() ? _fbb.CreateVector> (_o->inactive_power_configs.size(), [](size_t i, _VectorArgs *__va) { return CreateEdgeTpuInactivePowerConfig(*__va->__fbb, __va->__o->inactive_power_configs[i].get(), __va->__rehasher); }, &_va ) : 0; auto _inference_priority = _o->inference_priority; auto _edgetpu_device_spec = _o->edgetpu_device_spec ? CreateEdgeTpuDeviceSpec(_fbb, _o->edgetpu_device_spec.get(), _rehasher) : 0; auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token); auto _float_truncation_type = _o->float_truncation_type; auto _qos_class = _o->qos_class; return tflite::CreateEdgeTpuSettings( _fbb, _inference_power_state, _inactive_power_configs, _inference_priority, _edgetpu_device_spec, _model_token, _float_truncation_type, _qos_class); } inline bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs) { return (lhs.device == rhs.device) && (lhs.performance == rhs.performance) && (lhs.usb_always_dfu == rhs.usb_always_dfu) && (lhs.usb_max_bulk_in_queue_length == rhs.usb_max_bulk_in_queue_length); } inline bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs) { return !(lhs == rhs); } inline CoralSettingsT *CoralSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new CoralSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void CoralSettings::UnPackTo(CoralSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = device(); if (_e) _o->device = _e->str(); } { auto _e = performance(); _o->performance = _e; } { auto _e = usb_always_dfu(); _o->usb_always_dfu = _e; } { auto _e = usb_max_bulk_in_queue_length(); _o->usb_max_bulk_in_queue_length = _e; } } inline flatbuffers::Offset CoralSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateCoralSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateCoralSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CoralSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _device = _o->device.empty() ? 0 : _fbb.CreateString(_o->device); auto _performance = _o->performance; auto _usb_always_dfu = _o->usb_always_dfu; auto _usb_max_bulk_in_queue_length = _o->usb_max_bulk_in_queue_length; return tflite::CreateCoralSettings( _fbb, _device, _performance, _usb_always_dfu, _usb_max_bulk_in_queue_length); } inline bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs) { return (lhs.num_threads == rhs.num_threads); } inline bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs) { return !(lhs == rhs); } inline CPUSettingsT *CPUSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new CPUSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void CPUSettings::UnPackTo(CPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = num_threads(); _o->num_threads = _e; } } inline flatbuffers::Offset CPUSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateCPUSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateCPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CPUSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _num_threads = _o->num_threads; return tflite::CreateCPUSettings( _fbb, _num_threads); } inline bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) { return (lhs.delegate == rhs.delegate) && ((lhs.nnapi_settings == rhs.nnapi_settings) || (lhs.nnapi_settings && rhs.nnapi_settings && *lhs.nnapi_settings == *rhs.nnapi_settings)) && ((lhs.gpu_settings == rhs.gpu_settings) || (lhs.gpu_settings && rhs.gpu_settings && *lhs.gpu_settings == *rhs.gpu_settings)) && ((lhs.hexagon_settings == rhs.hexagon_settings) || (lhs.hexagon_settings && rhs.hexagon_settings && *lhs.hexagon_settings == *rhs.hexagon_settings)) && ((lhs.xnnpack_settings == rhs.xnnpack_settings) || (lhs.xnnpack_settings && rhs.xnnpack_settings && *lhs.xnnpack_settings == *rhs.xnnpack_settings)) && ((lhs.coreml_settings == rhs.coreml_settings) || (lhs.coreml_settings && rhs.coreml_settings && *lhs.coreml_settings == *rhs.coreml_settings)) && ((lhs.cpu_settings == rhs.cpu_settings) || (lhs.cpu_settings && rhs.cpu_settings && *lhs.cpu_settings == *rhs.cpu_settings)) && (lhs.max_delegated_partitions == rhs.max_delegated_partitions) && ((lhs.edgetpu_settings == rhs.edgetpu_settings) || (lhs.edgetpu_settings && rhs.edgetpu_settings && *lhs.edgetpu_settings == *rhs.edgetpu_settings)) && ((lhs.coral_settings == rhs.coral_settings) || (lhs.coral_settings && rhs.coral_settings && *lhs.coral_settings == *rhs.coral_settings)) && ((lhs.fallback_settings == rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings)) && (lhs.disable_default_delegates == rhs.disable_default_delegates); } inline bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) { return !(lhs == rhs); } inline TFLiteSettingsT::TFLiteSettingsT(const TFLiteSettingsT &o) : delegate(o.delegate), nnapi_settings((o.nnapi_settings) ? new tflite::NNAPISettingsT(*o.nnapi_settings) : nullptr), gpu_settings((o.gpu_settings) ? new tflite::GPUSettingsT(*o.gpu_settings) : nullptr), hexagon_settings((o.hexagon_settings) ? new tflite::HexagonSettingsT(*o.hexagon_settings) : nullptr), xnnpack_settings((o.xnnpack_settings) ? new tflite::XNNPackSettingsT(*o.xnnpack_settings) : nullptr), coreml_settings((o.coreml_settings) ? new tflite::CoreMLSettingsT(*o.coreml_settings) : nullptr), cpu_settings((o.cpu_settings) ? new tflite::CPUSettingsT(*o.cpu_settings) : nullptr), max_delegated_partitions(o.max_delegated_partitions), edgetpu_settings((o.edgetpu_settings) ? new tflite::EdgeTpuSettingsT(*o.edgetpu_settings) : nullptr), coral_settings((o.coral_settings) ? new tflite::CoralSettingsT(*o.coral_settings) : nullptr), fallback_settings((o.fallback_settings) ? new tflite::FallbackSettingsT(*o.fallback_settings) : nullptr), disable_default_delegates(o.disable_default_delegates) { } inline TFLiteSettingsT &TFLiteSettingsT::operator=(TFLiteSettingsT o) FLATBUFFERS_NOEXCEPT { std::swap(delegate, o.delegate); std::swap(nnapi_settings, o.nnapi_settings); std::swap(gpu_settings, o.gpu_settings); std::swap(hexagon_settings, o.hexagon_settings); std::swap(xnnpack_settings, o.xnnpack_settings); std::swap(coreml_settings, o.coreml_settings); std::swap(cpu_settings, o.cpu_settings); std::swap(max_delegated_partitions, o.max_delegated_partitions); std::swap(edgetpu_settings, o.edgetpu_settings); std::swap(coral_settings, o.coral_settings); std::swap(fallback_settings, o.fallback_settings); std::swap(disable_default_delegates, o.disable_default_delegates); return *this; } inline TFLiteSettingsT *TFLiteSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new TFLiteSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void TFLiteSettings::UnPackTo(TFLiteSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = delegate(); _o->delegate = _e; } { auto _e = nnapi_settings(); if (_e) { if(_o->nnapi_settings) { _e->UnPackTo(_o->nnapi_settings.get(), _resolver); } else { _o->nnapi_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = gpu_settings(); if (_e) { if(_o->gpu_settings) { _e->UnPackTo(_o->gpu_settings.get(), _resolver); } else { _o->gpu_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = hexagon_settings(); if (_e) { if(_o->hexagon_settings) { _e->UnPackTo(_o->hexagon_settings.get(), _resolver); } else { _o->hexagon_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = xnnpack_settings(); if (_e) { if(_o->xnnpack_settings) { _e->UnPackTo(_o->xnnpack_settings.get(), _resolver); } else { _o->xnnpack_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = coreml_settings(); if (_e) { if(_o->coreml_settings) { _e->UnPackTo(_o->coreml_settings.get(), _resolver); } else { _o->coreml_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = cpu_settings(); if (_e) { if(_o->cpu_settings) { _e->UnPackTo(_o->cpu_settings.get(), _resolver); } else { _o->cpu_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = max_delegated_partitions(); _o->max_delegated_partitions = _e; } { auto _e = edgetpu_settings(); if (_e) { if(_o->edgetpu_settings) { _e->UnPackTo(_o->edgetpu_settings.get(), _resolver); } else { _o->edgetpu_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = coral_settings(); if (_e) { if(_o->coral_settings) { _e->UnPackTo(_o->coral_settings.get(), _resolver); } else { _o->coral_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = fallback_settings(); if (_e) { if(_o->fallback_settings) { _e->UnPackTo(_o->fallback_settings.get(), _resolver); } else { _o->fallback_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = disable_default_delegates(); _o->disable_default_delegates = _e; } } inline flatbuffers::Offset TFLiteSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateTFLiteSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateTFLiteSettings(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TFLiteSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _delegate = _o->delegate; auto _nnapi_settings = _o->nnapi_settings ? CreateNNAPISettings(_fbb, _o->nnapi_settings.get(), _rehasher) : 0; auto _gpu_settings = _o->gpu_settings ? CreateGPUSettings(_fbb, _o->gpu_settings.get(), _rehasher) : 0; auto _hexagon_settings = _o->hexagon_settings ? CreateHexagonSettings(_fbb, _o->hexagon_settings.get(), _rehasher) : 0; auto _xnnpack_settings = _o->xnnpack_settings ? CreateXNNPackSettings(_fbb, _o->xnnpack_settings.get(), _rehasher) : 0; auto _coreml_settings = _o->coreml_settings ? CreateCoreMLSettings(_fbb, _o->coreml_settings.get(), _rehasher) : 0; auto _cpu_settings = _o->cpu_settings ? CreateCPUSettings(_fbb, _o->cpu_settings.get(), _rehasher) : 0; auto _max_delegated_partitions = _o->max_delegated_partitions; auto _edgetpu_settings = _o->edgetpu_settings ? CreateEdgeTpuSettings(_fbb, _o->edgetpu_settings.get(), _rehasher) : 0; auto _coral_settings = _o->coral_settings ? CreateCoralSettings(_fbb, _o->coral_settings.get(), _rehasher) : 0; auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0; auto _disable_default_delegates = _o->disable_default_delegates; return tflite::CreateTFLiteSettings( _fbb, _delegate, _nnapi_settings, _gpu_settings, _hexagon_settings, _xnnpack_settings, _coreml_settings, _cpu_settings, _max_delegated_partitions, _edgetpu_settings, _coral_settings, _fallback_settings, _disable_default_delegates); } inline bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) { return (lhs.allow_automatic_fallback_on_compilation_error == rhs.allow_automatic_fallback_on_compilation_error) && (lhs.allow_automatic_fallback_on_execution_error == rhs.allow_automatic_fallback_on_execution_error); } inline bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) { return !(lhs == rhs); } inline FallbackSettingsT *FallbackSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new FallbackSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void FallbackSettings::UnPackTo(FallbackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = allow_automatic_fallback_on_compilation_error(); _o->allow_automatic_fallback_on_compilation_error = _e; } { auto _e = allow_automatic_fallback_on_execution_error(); _o->allow_automatic_fallback_on_execution_error = _e; } } inline flatbuffers::Offset FallbackSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateFallbackSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateFallbackSettings(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FallbackSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _allow_automatic_fallback_on_compilation_error = _o->allow_automatic_fallback_on_compilation_error; auto _allow_automatic_fallback_on_execution_error = _o->allow_automatic_fallback_on_execution_error; return tflite::CreateFallbackSettings( _fbb, _allow_automatic_fallback_on_compilation_error, _allow_automatic_fallback_on_execution_error); } inline bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) { return (lhs.name == rhs.name) && (lhs.values == rhs.values); } inline bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) { return !(lhs == rhs); } inline BenchmarkMetricT *BenchmarkMetric::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BenchmarkMetricT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BenchmarkMetric::UnPackTo(BenchmarkMetricT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = name(); if (_e) _o->name = _e->str(); } { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } } inline flatbuffers::Offset BenchmarkMetric::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBenchmarkMetric(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBenchmarkMetric(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkMetricT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; return tflite::CreateBenchmarkMetric( _fbb, _name, _values); } inline bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) { return (lhs.initialization_time_us == rhs.initialization_time_us) && (lhs.inference_time_us == rhs.inference_time_us) && (lhs.max_memory_kb == rhs.max_memory_kb) && (lhs.ok == rhs.ok) && (lhs.metrics == rhs.metrics); } inline bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) { return !(lhs == rhs); } inline BenchmarkResultT::BenchmarkResultT(const BenchmarkResultT &o) : initialization_time_us(o.initialization_time_us), inference_time_us(o.inference_time_us), max_memory_kb(o.max_memory_kb), ok(o.ok) { metrics.reserve(o.metrics.size()); for (const auto &v : o.metrics) { metrics.emplace_back((v) ? new tflite::BenchmarkMetricT(*v) : nullptr); } } inline BenchmarkResultT &BenchmarkResultT::operator=(BenchmarkResultT o) FLATBUFFERS_NOEXCEPT { std::swap(initialization_time_us, o.initialization_time_us); std::swap(inference_time_us, o.inference_time_us); std::swap(max_memory_kb, o.max_memory_kb); std::swap(ok, o.ok); std::swap(metrics, o.metrics); return *this; } inline BenchmarkResultT *BenchmarkResult::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BenchmarkResultT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BenchmarkResult::UnPackTo(BenchmarkResultT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = initialization_time_us(); if (_e) { _o->initialization_time_us.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->initialization_time_us[_i] = _e->Get(_i); } } } { auto _e = inference_time_us(); if (_e) { _o->inference_time_us.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inference_time_us[_i] = _e->Get(_i); } } } { auto _e = max_memory_kb(); _o->max_memory_kb = _e; } { auto _e = ok(); _o->ok = _e; } { auto _e = metrics(); if (_e) { _o->metrics.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metrics[_i]) { _e->Get(_i)->UnPackTo(_o->metrics[_i].get(), _resolver); } else { _o->metrics[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } } inline flatbuffers::Offset BenchmarkResult::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBenchmarkResult(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBenchmarkResult(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkResultT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _initialization_time_us = _o->initialization_time_us.size() ? _fbb.CreateVector(_o->initialization_time_us) : 0; auto _inference_time_us = _o->inference_time_us.size() ? _fbb.CreateVector(_o->inference_time_us) : 0; auto _max_memory_kb = _o->max_memory_kb; auto _ok = _o->ok; auto _metrics = _o->metrics.size() ? _fbb.CreateVector> (_o->metrics.size(), [](size_t i, _VectorArgs *__va) { return CreateBenchmarkMetric(*__va->__fbb, __va->__o->metrics[i].get(), __va->__rehasher); }, &_va ) : 0; return tflite::CreateBenchmarkResult( _fbb, _initialization_time_us, _inference_time_us, _max_memory_kb, _ok, _metrics); } inline bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs) { return (lhs.source == rhs.source) && (lhs.tflite_error == rhs.tflite_error) && (lhs.underlying_api_error == rhs.underlying_api_error); } inline bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs) { return !(lhs == rhs); } inline ErrorCodeT *ErrorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new ErrorCodeT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void ErrorCode::UnPackTo(ErrorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = source(); _o->source = _e; } { auto _e = tflite_error(); _o->tflite_error = _e; } { auto _e = underlying_api_error(); _o->underlying_api_error = _e; } } inline flatbuffers::Offset ErrorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateErrorCode(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateErrorCode(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ErrorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _source = _o->source; auto _tflite_error = _o->tflite_error; auto _underlying_api_error = _o->underlying_api_error; return tflite::CreateErrorCode( _fbb, _source, _tflite_error, _underlying_api_error); } inline bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) { return (lhs.stage == rhs.stage) && (lhs.exit_code == rhs.exit_code) && (lhs.signal == rhs.signal) && (lhs.error_code == rhs.error_code) && (lhs.mini_benchmark_error_code == rhs.mini_benchmark_error_code); } inline bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) { return !(lhs == rhs); } inline BenchmarkErrorT::BenchmarkErrorT(const BenchmarkErrorT &o) : stage(o.stage), exit_code(o.exit_code), signal(o.signal), mini_benchmark_error_code(o.mini_benchmark_error_code) { error_code.reserve(o.error_code.size()); for (const auto &v : o.error_code) { error_code.emplace_back((v) ? new tflite::ErrorCodeT(*v) : nullptr); } } inline BenchmarkErrorT &BenchmarkErrorT::operator=(BenchmarkErrorT o) FLATBUFFERS_NOEXCEPT { std::swap(stage, o.stage); std::swap(exit_code, o.exit_code); std::swap(signal, o.signal); std::swap(error_code, o.error_code); std::swap(mini_benchmark_error_code, o.mini_benchmark_error_code); return *this; } inline BenchmarkErrorT *BenchmarkError::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BenchmarkErrorT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BenchmarkError::UnPackTo(BenchmarkErrorT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = stage(); _o->stage = _e; } { auto _e = exit_code(); _o->exit_code = _e; } { auto _e = signal(); _o->signal = _e; } { auto _e = error_code(); if (_e) { _o->error_code.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->error_code[_i]) { _e->Get(_i)->UnPackTo(_o->error_code[_i].get(), _resolver); } else { _o->error_code[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } { auto _e = mini_benchmark_error_code(); _o->mini_benchmark_error_code = _e; } } inline flatbuffers::Offset BenchmarkError::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBenchmarkError(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBenchmarkError(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkErrorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _stage = _o->stage; auto _exit_code = _o->exit_code; auto _signal = _o->signal; auto _error_code = _o->error_code.size() ? _fbb.CreateVector> (_o->error_code.size(), [](size_t i, _VectorArgs *__va) { return CreateErrorCode(*__va->__fbb, __va->__o->error_code[i].get(), __va->__rehasher); }, &_va ) : 0; auto _mini_benchmark_error_code = _o->mini_benchmark_error_code; return tflite::CreateBenchmarkError( _fbb, _stage, _exit_code, _signal, _error_code, _mini_benchmark_error_code); } inline bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) { return ((lhs.tflite_settings == rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings)) && (lhs.event_type == rhs.event_type) && ((lhs.result == rhs.result) || (lhs.result && rhs.result && *lhs.result == *rhs.result)) && ((lhs.error == rhs.error) || (lhs.error && rhs.error && *lhs.error == *rhs.error)) && (lhs.boottime_us == rhs.boottime_us) && (lhs.wallclock_us == rhs.wallclock_us); } inline bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) { return !(lhs == rhs); } inline BenchmarkEventT::BenchmarkEventT(const BenchmarkEventT &o) : tflite_settings((o.tflite_settings) ? new tflite::TFLiteSettingsT(*o.tflite_settings) : nullptr), event_type(o.event_type), result((o.result) ? new tflite::BenchmarkResultT(*o.result) : nullptr), error((o.error) ? new tflite::BenchmarkErrorT(*o.error) : nullptr), boottime_us(o.boottime_us), wallclock_us(o.wallclock_us) { } inline BenchmarkEventT &BenchmarkEventT::operator=(BenchmarkEventT o) FLATBUFFERS_NOEXCEPT { std::swap(tflite_settings, o.tflite_settings); std::swap(event_type, o.event_type); std::swap(result, o.result); std::swap(error, o.error); std::swap(boottime_us, o.boottime_us); std::swap(wallclock_us, o.wallclock_us); return *this; } inline BenchmarkEventT *BenchmarkEvent::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BenchmarkEventT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BenchmarkEvent::UnPackTo(BenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = tflite_settings(); if (_e) { if(_o->tflite_settings) { _e->UnPackTo(_o->tflite_settings.get(), _resolver); } else { _o->tflite_settings = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = event_type(); _o->event_type = _e; } { auto _e = result(); if (_e) { if(_o->result) { _e->UnPackTo(_o->result.get(), _resolver); } else { _o->result = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = error(); if (_e) { if(_o->error) { _e->UnPackTo(_o->error.get(), _resolver); } else { _o->error = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = boottime_us(); _o->boottime_us = _e; } { auto _e = wallclock_us(); _o->wallclock_us = _e; } } inline flatbuffers::Offset BenchmarkEvent::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBenchmarkEvent(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkEventT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0; auto _event_type = _o->event_type; auto _result = _o->result ? CreateBenchmarkResult(_fbb, _o->result.get(), _rehasher) : 0; auto _error = _o->error ? CreateBenchmarkError(_fbb, _o->error.get(), _rehasher) : 0; auto _boottime_us = _o->boottime_us; auto _wallclock_us = _o->wallclock_us; return tflite::CreateBenchmarkEvent( _fbb, _tflite_settings, _event_type, _result, _error, _boottime_us, _wallclock_us); } inline bool operator==(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs) { return (lhs.number_of_source_events == rhs.number_of_source_events) && ((lhs.min_latency_event == rhs.min_latency_event) || (lhs.min_latency_event && rhs.min_latency_event && *lhs.min_latency_event == *rhs.min_latency_event)) && (lhs.min_inference_time_us == rhs.min_inference_time_us); } inline bool operator!=(const BestAccelerationDecisionT &lhs, const BestAccelerationDecisionT &rhs) { return !(lhs == rhs); } inline BestAccelerationDecisionT::BestAccelerationDecisionT(const BestAccelerationDecisionT &o) : number_of_source_events(o.number_of_source_events), min_latency_event((o.min_latency_event) ? new tflite::BenchmarkEventT(*o.min_latency_event) : nullptr), min_inference_time_us(o.min_inference_time_us) { } inline BestAccelerationDecisionT &BestAccelerationDecisionT::operator=(BestAccelerationDecisionT o) FLATBUFFERS_NOEXCEPT { std::swap(number_of_source_events, o.number_of_source_events); std::swap(min_latency_event, o.min_latency_event); std::swap(min_inference_time_us, o.min_inference_time_us); return *this; } inline BestAccelerationDecisionT *BestAccelerationDecision::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BestAccelerationDecisionT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BestAccelerationDecision::UnPackTo(BestAccelerationDecisionT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = number_of_source_events(); _o->number_of_source_events = _e; } { auto _e = min_latency_event(); if (_e) { if(_o->min_latency_event) { _e->UnPackTo(_o->min_latency_event.get(), _resolver); } else { _o->min_latency_event = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = min_inference_time_us(); _o->min_inference_time_us = _e; } } inline flatbuffers::Offset BestAccelerationDecision::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBestAccelerationDecision(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBestAccelerationDecision(flatbuffers::FlatBufferBuilder &_fbb, const BestAccelerationDecisionT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BestAccelerationDecisionT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _number_of_source_events = _o->number_of_source_events; auto _min_latency_event = _o->min_latency_event ? CreateBenchmarkEvent(_fbb, _o->min_latency_event.get(), _rehasher) : 0; auto _min_inference_time_us = _o->min_inference_time_us; return tflite::CreateBestAccelerationDecision( _fbb, _number_of_source_events, _min_latency_event, _min_inference_time_us); } inline bool operator==(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs) { return (lhs.initialization_status == rhs.initialization_status); } inline bool operator!=(const BenchmarkInitializationFailureT &lhs, const BenchmarkInitializationFailureT &rhs) { return !(lhs == rhs); } inline BenchmarkInitializationFailureT *BenchmarkInitializationFailure::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BenchmarkInitializationFailureT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BenchmarkInitializationFailure::UnPackTo(BenchmarkInitializationFailureT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = initialization_status(); _o->initialization_status = _e; } } inline flatbuffers::Offset BenchmarkInitializationFailure::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBenchmarkInitializationFailure(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBenchmarkInitializationFailure(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkInitializationFailureT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkInitializationFailureT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _initialization_status = _o->initialization_status; return tflite::CreateBenchmarkInitializationFailure( _fbb, _initialization_status); } inline bool operator==(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs) { return (lhs.is_log_flushing_event == rhs.is_log_flushing_event) && ((lhs.best_acceleration_decision == rhs.best_acceleration_decision) || (lhs.best_acceleration_decision && rhs.best_acceleration_decision && *lhs.best_acceleration_decision == *rhs.best_acceleration_decision)) && ((lhs.initialization_failure == rhs.initialization_failure) || (lhs.initialization_failure && rhs.initialization_failure && *lhs.initialization_failure == *rhs.initialization_failure)) && ((lhs.benchmark_event == rhs.benchmark_event) || (lhs.benchmark_event && rhs.benchmark_event && *lhs.benchmark_event == *rhs.benchmark_event)); } inline bool operator!=(const MiniBenchmarkEventT &lhs, const MiniBenchmarkEventT &rhs) { return !(lhs == rhs); } inline MiniBenchmarkEventT::MiniBenchmarkEventT(const MiniBenchmarkEventT &o) : is_log_flushing_event(o.is_log_flushing_event), best_acceleration_decision((o.best_acceleration_decision) ? new tflite::BestAccelerationDecisionT(*o.best_acceleration_decision) : nullptr), initialization_failure((o.initialization_failure) ? new tflite::BenchmarkInitializationFailureT(*o.initialization_failure) : nullptr), benchmark_event((o.benchmark_event) ? new tflite::BenchmarkEventT(*o.benchmark_event) : nullptr) { } inline MiniBenchmarkEventT &MiniBenchmarkEventT::operator=(MiniBenchmarkEventT o) FLATBUFFERS_NOEXCEPT { std::swap(is_log_flushing_event, o.is_log_flushing_event); std::swap(best_acceleration_decision, o.best_acceleration_decision); std::swap(initialization_failure, o.initialization_failure); std::swap(benchmark_event, o.benchmark_event); return *this; } inline MiniBenchmarkEventT *MiniBenchmarkEvent::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new MiniBenchmarkEventT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void MiniBenchmarkEvent::UnPackTo(MiniBenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = is_log_flushing_event(); _o->is_log_flushing_event = _e; } { auto _e = best_acceleration_decision(); if (_e) { if(_o->best_acceleration_decision) { _e->UnPackTo(_o->best_acceleration_decision.get(), _resolver); } else { _o->best_acceleration_decision = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = initialization_failure(); if (_e) { if(_o->initialization_failure) { _e->UnPackTo(_o->initialization_failure.get(), _resolver); } else { _o->initialization_failure = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = benchmark_event(); if (_e) { if(_o->benchmark_event) { _e->UnPackTo(_o->benchmark_event.get(), _resolver); } else { _o->benchmark_event = std::unique_ptr(_e->UnPack(_resolver)); } } } } inline flatbuffers::Offset MiniBenchmarkEvent::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateMiniBenchmarkEvent(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateMiniBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const MiniBenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MiniBenchmarkEventT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _is_log_flushing_event = _o->is_log_flushing_event; auto _best_acceleration_decision = _o->best_acceleration_decision ? CreateBestAccelerationDecision(_fbb, _o->best_acceleration_decision.get(), _rehasher) : 0; auto _initialization_failure = _o->initialization_failure ? CreateBenchmarkInitializationFailure(_fbb, _o->initialization_failure.get(), _rehasher) : 0; auto _benchmark_event = _o->benchmark_event ? CreateBenchmarkEvent(_fbb, _o->benchmark_event.get(), _rehasher) : 0; return tflite::CreateMiniBenchmarkEvent( _fbb, _is_log_flushing_event, _best_acceleration_decision, _initialization_failure, _benchmark_event); } inline bool operator==(const ModelFileT &lhs, const ModelFileT &rhs) { return (lhs.filename == rhs.filename) && (lhs.fd == rhs.fd) && (lhs.offset == rhs.offset) && (lhs.length == rhs.length); } inline bool operator!=(const ModelFileT &lhs, const ModelFileT &rhs) { return !(lhs == rhs); } inline ModelFileT *ModelFile::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new ModelFileT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void ModelFile::UnPackTo(ModelFileT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = filename(); if (_e) _o->filename = _e->str(); } { auto _e = fd(); _o->fd = _e; } { auto _e = offset(); _o->offset = _e; } { auto _e = length(); _o->length = _e; } } inline flatbuffers::Offset ModelFile::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateModelFile(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateModelFile(flatbuffers::FlatBufferBuilder &_fbb, const ModelFileT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelFileT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _filename = _o->filename.empty() ? 0 : _fbb.CreateString(_o->filename); auto _fd = _o->fd; auto _offset = _o->offset; auto _length = _o->length; return tflite::CreateModelFile( _fbb, _filename, _fd, _offset, _length); } inline bool operator==(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs) { return (lhs.storage_file_path == rhs.storage_file_path) && (lhs.data_directory_path == rhs.data_directory_path); } inline bool operator!=(const BenchmarkStoragePathsT &lhs, const BenchmarkStoragePathsT &rhs) { return !(lhs == rhs); } inline BenchmarkStoragePathsT *BenchmarkStoragePaths::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new BenchmarkStoragePathsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void BenchmarkStoragePaths::UnPackTo(BenchmarkStoragePathsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = storage_file_path(); if (_e) _o->storage_file_path = _e->str(); } { auto _e = data_directory_path(); if (_e) _o->data_directory_path = _e->str(); } } inline flatbuffers::Offset BenchmarkStoragePaths::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateBenchmarkStoragePaths(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateBenchmarkStoragePaths(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkStoragePathsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkStoragePathsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _storage_file_path = _o->storage_file_path.empty() ? 0 : _fbb.CreateString(_o->storage_file_path); auto _data_directory_path = _o->data_directory_path.empty() ? 0 : _fbb.CreateString(_o->data_directory_path); return tflite::CreateBenchmarkStoragePaths( _fbb, _storage_file_path, _data_directory_path); } inline bool operator==(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs) { return (lhs.settings_to_test == rhs.settings_to_test) && ((lhs.model_file == rhs.model_file) || (lhs.model_file && rhs.model_file && *lhs.model_file == *rhs.model_file)) && ((lhs.storage_paths == rhs.storage_paths) || (lhs.storage_paths && rhs.storage_paths && *lhs.storage_paths == *rhs.storage_paths)); } inline bool operator!=(const MinibenchmarkSettingsT &lhs, const MinibenchmarkSettingsT &rhs) { return !(lhs == rhs); } inline MinibenchmarkSettingsT::MinibenchmarkSettingsT(const MinibenchmarkSettingsT &o) : model_file((o.model_file) ? new tflite::ModelFileT(*o.model_file) : nullptr), storage_paths((o.storage_paths) ? new tflite::BenchmarkStoragePathsT(*o.storage_paths) : nullptr) { settings_to_test.reserve(o.settings_to_test.size()); for (const auto &v : o.settings_to_test) { settings_to_test.emplace_back((v) ? new tflite::TFLiteSettingsT(*v) : nullptr); } } inline MinibenchmarkSettingsT &MinibenchmarkSettingsT::operator=(MinibenchmarkSettingsT o) FLATBUFFERS_NOEXCEPT { std::swap(settings_to_test, o.settings_to_test); std::swap(model_file, o.model_file); std::swap(storage_paths, o.storage_paths); return *this; } inline MinibenchmarkSettingsT *MinibenchmarkSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const { auto _o = std::unique_ptr(new MinibenchmarkSettingsT()); UnPackTo(_o.get(), _resolver); return _o.release(); } inline void MinibenchmarkSettings::UnPackTo(MinibenchmarkSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const { (void)_o; (void)_resolver; { auto _e = settings_to_test(); if (_e) { _o->settings_to_test.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->settings_to_test[_i]) { _e->Get(_i)->UnPackTo(_o->settings_to_test[_i].get(), _resolver); } else { _o->settings_to_test[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } { auto _e = model_file(); if (_e) { if(_o->model_file) { _e->UnPackTo(_o->model_file.get(), _resolver); } else { _o->model_file = std::unique_ptr(_e->UnPack(_resolver)); } } } { auto _e = storage_paths(); if (_e) { if(_o->storage_paths) { _e->UnPackTo(_o->storage_paths.get(), _resolver); } else { _o->storage_paths = std::unique_ptr(_e->UnPack(_resolver)); } } } } inline flatbuffers::Offset MinibenchmarkSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { return CreateMinibenchmarkSettings(_fbb, _o, _rehasher); } inline flatbuffers::Offset CreateMinibenchmarkSettings(flatbuffers::FlatBufferBuilder &_fbb, const MinibenchmarkSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { (void)_rehasher; (void)_o; struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MinibenchmarkSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; auto _settings_to_test = _o->settings_to_test.size() ? _fbb.CreateVector> (_o->settings_to_test.size(), [](size_t i, _VectorArgs *__va) { return CreateTFLiteSettings(*__va->__fbb, __va->__o->settings_to_test[i].get(), __va->__rehasher); }, &_va ) : 0; auto _model_file = _o->model_file ? CreateModelFile(_fbb, _o->model_file.get(), _rehasher) : 0; auto _storage_paths = _o->storage_paths ? CreateBenchmarkStoragePaths(_fbb, _o->storage_paths.get(), _rehasher) : 0; return tflite::CreateMinibenchmarkSettings( _fbb, _settings_to_test, _model_file, _storage_paths); } } // namespace tflite #endif // FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_