1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/experimental/acceleration/configuration/flatbuffer_to_proto.h"
16
17 #include <memory>
18 #include <string>
19 #include <utility>
20
21 #include <gmock/gmock.h>
22 #include <gtest/gtest.h>
23 #include "tensorflow/lite/experimental/acceleration/configuration/configuration.pb.h"
24 #include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
25
26 namespace tflite {
27 namespace acceleration {
28 namespace {
29
30 class ConversionTest : public ::testing::Test {
31 protected:
CheckDelegateEnum(Delegate input,proto::Delegate output)32 void CheckDelegateEnum(Delegate input, proto::Delegate output) {
33 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
34 settings_.tflite_settings->delegate = input;
35 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
36 EXPECT_EQ(output, compute.tflite_settings().delegate());
37 }
CheckExecutionPreference(ExecutionPreference input,proto::ExecutionPreference output)38 void CheckExecutionPreference(ExecutionPreference input,
39 proto::ExecutionPreference output) {
40 settings_.preference = input;
41 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
42 EXPECT_EQ(output, compute.preference());
43 }
CheckNNAPIExecutionPreference(NNAPIExecutionPreference input,proto::NNAPIExecutionPreference output)44 void CheckNNAPIExecutionPreference(NNAPIExecutionPreference input,
45 proto::NNAPIExecutionPreference output) {
46 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
47 settings_.tflite_settings->nnapi_settings =
48 std::make_unique<NNAPISettingsT>();
49 settings_.tflite_settings->nnapi_settings->execution_preference = input;
50 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
51 EXPECT_EQ(
52 output,
53 compute.tflite_settings().nnapi_settings().execution_preference());
54 }
CheckNNAPIExecutionPriority(NNAPIExecutionPriority input,proto::NNAPIExecutionPriority output)55 void CheckNNAPIExecutionPriority(NNAPIExecutionPriority input,
56 proto::NNAPIExecutionPriority output) {
57 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
58 settings_.tflite_settings->nnapi_settings =
59 std::make_unique<NNAPISettingsT>();
60 settings_.tflite_settings->nnapi_settings->execution_priority = input;
61 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
62 EXPECT_EQ(output,
63 compute.tflite_settings().nnapi_settings().execution_priority());
64 }
CheckGPUBackend(GPUBackend input,proto::GPUBackend output)65 void CheckGPUBackend(GPUBackend input, proto::GPUBackend output) {
66 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
67 settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
68 settings_.tflite_settings->gpu_settings->force_backend = input;
69 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
70 EXPECT_EQ(output, compute.tflite_settings().gpu_settings().force_backend());
71 }
72
73 ComputeSettingsT settings_;
74 MiniBenchmarkEventT event_;
75 };
76
TEST_F(ConversionTest,Delegate)77 TEST_F(ConversionTest, Delegate) {
78 CheckDelegateEnum(Delegate_NONE, proto::Delegate::NONE);
79 CheckDelegateEnum(Delegate_NNAPI, proto::Delegate::NNAPI);
80 CheckDelegateEnum(Delegate_GPU, proto::Delegate::GPU);
81 CheckDelegateEnum(Delegate_HEXAGON, proto::Delegate::HEXAGON);
82 CheckDelegateEnum(Delegate_EDGETPU, proto::Delegate::EDGETPU);
83 CheckDelegateEnum(Delegate_EDGETPU_CORAL, proto::Delegate::EDGETPU_CORAL);
84 CheckDelegateEnum(Delegate_XNNPACK, proto::Delegate::XNNPACK);
85 CheckDelegateEnum(Delegate_CORE_ML, proto::Delegate::CORE_ML);
86 }
87
TEST_F(ConversionTest,ExecutionPreference)88 TEST_F(ConversionTest, ExecutionPreference) {
89 CheckExecutionPreference(ExecutionPreference_ANY,
90 proto::ExecutionPreference::ANY);
91 CheckExecutionPreference(ExecutionPreference_LOW_LATENCY,
92 proto::ExecutionPreference::LOW_LATENCY);
93 CheckExecutionPreference(ExecutionPreference_LOW_POWER,
94 proto::ExecutionPreference::LOW_POWER);
95 CheckExecutionPreference(ExecutionPreference_FORCE_CPU,
96 proto::ExecutionPreference::FORCE_CPU);
97 }
98
TEST_F(ConversionTest,ModelIdentifier)99 TEST_F(ConversionTest, ModelIdentifier) {
100 settings_.model_identifier_for_statistics = "id";
101 settings_.model_namespace_for_statistics = "ns";
102 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
103 EXPECT_EQ(compute.model_namespace_for_statistics(), "ns");
104 EXPECT_EQ(compute.model_identifier_for_statistics(), "id");
105 }
106
TEST_F(ConversionTest,NNAPISettings)107 TEST_F(ConversionTest, NNAPISettings) {
108 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
109 settings_.tflite_settings->nnapi_settings =
110 std::make_unique<NNAPISettingsT>();
111 NNAPISettingsT* input_settings =
112 settings_.tflite_settings->nnapi_settings.get();
113 input_settings->accelerator_name = "a";
114 input_settings->cache_directory = "d";
115 input_settings->model_token = "t";
116 input_settings->allow_fp16_precision_for_fp32 = true;
117
118 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
119 proto::NNAPISettings output_settings =
120 compute.tflite_settings().nnapi_settings();
121 EXPECT_EQ(output_settings.accelerator_name(), "a");
122 EXPECT_EQ(output_settings.cache_directory(), "d");
123 EXPECT_EQ(output_settings.model_token(), "t");
124 EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
125 EXPECT_FALSE(output_settings.allow_nnapi_cpu_on_android_10_plus());
126 EXPECT_FALSE(output_settings.fallback_settings()
127 .allow_automatic_fallback_on_compilation_error());
128 EXPECT_FALSE(output_settings.fallback_settings()
129 .allow_automatic_fallback_on_execution_error());
130
131 input_settings->fallback_settings = std::make_unique<FallbackSettingsT>();
132 input_settings->fallback_settings
133 ->allow_automatic_fallback_on_compilation_error = true;
134 compute = ConvertFromFlatbuffer(settings_);
135 output_settings = compute.tflite_settings().nnapi_settings();
136 EXPECT_TRUE(output_settings.fallback_settings()
137 .allow_automatic_fallback_on_compilation_error());
138 EXPECT_FALSE(output_settings.fallback_settings()
139 .allow_automatic_fallback_on_execution_error());
140
141 input_settings->fallback_settings
142 ->allow_automatic_fallback_on_compilation_error = false;
143 input_settings->fallback_settings
144 ->allow_automatic_fallback_on_execution_error = true;
145 compute = ConvertFromFlatbuffer(settings_);
146 output_settings = compute.tflite_settings().nnapi_settings();
147 EXPECT_FALSE(output_settings.fallback_settings()
148 .allow_automatic_fallback_on_compilation_error());
149 EXPECT_TRUE(output_settings.fallback_settings()
150 .allow_automatic_fallback_on_execution_error());
151
152 input_settings->allow_fp16_precision_for_fp32 = false;
153 compute = ConvertFromFlatbuffer(settings_);
154 output_settings = compute.tflite_settings().nnapi_settings();
155 EXPECT_FALSE(output_settings.allow_fp16_precision_for_fp32());
156 }
157
TEST_F(ConversionTest,NNAPIAllowDynamicDimensions)158 TEST_F(ConversionTest, NNAPIAllowDynamicDimensions) {
159 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
160 settings_.tflite_settings->nnapi_settings =
161 std::make_unique<NNAPISettingsT>();
162 NNAPISettingsT* input_settings =
163 settings_.tflite_settings->nnapi_settings.get();
164
165 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
166 proto::NNAPISettings output_settings =
167 compute.tflite_settings().nnapi_settings();
168 EXPECT_FALSE(output_settings.allow_dynamic_dimensions());
169
170 input_settings->allow_dynamic_dimensions = true;
171 compute = ConvertFromFlatbuffer(settings_);
172 output_settings = compute.tflite_settings().nnapi_settings();
173 EXPECT_TRUE(output_settings.allow_dynamic_dimensions());
174 }
175
TEST_F(ConversionTest,NNAPIBurstComputation)176 TEST_F(ConversionTest, NNAPIBurstComputation) {
177 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
178 settings_.tflite_settings->nnapi_settings =
179 std::make_unique<NNAPISettingsT>();
180 NNAPISettingsT* input_settings =
181 settings_.tflite_settings->nnapi_settings.get();
182
183 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
184 proto::NNAPISettings output_settings =
185 compute.tflite_settings().nnapi_settings();
186 EXPECT_FALSE(output_settings.use_burst_computation());
187
188 input_settings->use_burst_computation = true;
189 compute = ConvertFromFlatbuffer(settings_);
190 output_settings = compute.tflite_settings().nnapi_settings();
191 EXPECT_TRUE(output_settings.use_burst_computation());
192 }
193
TEST_F(ConversionTest,NNAPIExecutionPreference)194 TEST_F(ConversionTest, NNAPIExecutionPreference) {
195 CheckNNAPIExecutionPreference(
196 NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
197 proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER);
198 CheckNNAPIExecutionPreference(
199 NNAPIExecutionPreference_NNAPI_LOW_POWER,
200 proto::NNAPIExecutionPreference::NNAPI_LOW_POWER);
201 CheckNNAPIExecutionPreference(
202 NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED,
203 proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED);
204 CheckNNAPIExecutionPreference(NNAPIExecutionPreference_UNDEFINED,
205 proto::NNAPIExecutionPreference::UNDEFINED);
206 }
207
TEST_F(ConversionTest,NNAPIExecutionPriority)208 TEST_F(ConversionTest, NNAPIExecutionPriority) {
209 CheckNNAPIExecutionPriority(
210 NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
211 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW);
212 CheckNNAPIExecutionPriority(
213 NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
214 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM);
215 CheckNNAPIExecutionPriority(
216 NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH,
217 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH);
218 CheckNNAPIExecutionPriority(
219 NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
220 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED);
221 }
222
TEST_F(ConversionTest,NNAPISupportLibraryHandle)223 TEST_F(ConversionTest, NNAPISupportLibraryHandle) {
224 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
225 settings_.tflite_settings->nnapi_settings =
226 std::make_unique<NNAPISettingsT>();
227 NNAPISettingsT* input_settings =
228 settings_.tflite_settings->nnapi_settings.get();
229
230 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
231 proto::NNAPISettings output_settings =
232 compute.tflite_settings().nnapi_settings();
233 EXPECT_EQ(output_settings.support_library_handle(), 0);
234
235 input_settings->support_library_handle = std::numeric_limits<int64_t>::max();
236 compute = ConvertFromFlatbuffer(settings_);
237 output_settings = compute.tflite_settings().nnapi_settings();
238 EXPECT_EQ(output_settings.support_library_handle(),
239 std::numeric_limits<int64_t>::max());
240 }
241
TEST_F(ConversionTest,GPUSettings)242 TEST_F(ConversionTest, GPUSettings) {
243 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
244 settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
245 GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
246
247 input_settings->is_precision_loss_allowed = true;
248 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
249 proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
250 EXPECT_TRUE(output_settings.is_precision_loss_allowed());
251
252 input_settings->is_precision_loss_allowed = false;
253 compute = ConvertFromFlatbuffer(settings_);
254 output_settings = compute.tflite_settings().gpu_settings();
255 EXPECT_FALSE(output_settings.is_precision_loss_allowed());
256
257 EXPECT_TRUE(output_settings.enable_quantized_inference());
258 input_settings->enable_quantized_inference = false;
259 compute = ConvertFromFlatbuffer(settings_);
260 output_settings = compute.tflite_settings().gpu_settings();
261 EXPECT_FALSE(output_settings.enable_quantized_inference());
262 }
263
TEST_F(ConversionTest,GPUBacked)264 TEST_F(ConversionTest, GPUBacked) {
265 CheckGPUBackend(GPUBackend_UNSET, proto::GPUBackend::UNSET);
266 CheckGPUBackend(GPUBackend_OPENCL, proto::GPUBackend::OPENCL);
267 CheckGPUBackend(GPUBackend_OPENGL, proto::GPUBackend::OPENGL);
268 }
269
TEST_F(ConversionTest,GPUInferencePriority)270 TEST_F(ConversionTest, GPUInferencePriority) {
271 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
272 settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
273 GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
274
275 input_settings->inference_priority1 =
276 GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE;
277 input_settings->inference_priority2 =
278 GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY;
279 // Third priority is AUTO by default.
280
281 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
282 proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
283
284 EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE,
285 output_settings.inference_priority1());
286 EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY,
287 output_settings.inference_priority2());
288 EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_AUTO,
289 output_settings.inference_priority3());
290 }
291
TEST_F(ConversionTest,GPUInferencePreference)292 TEST_F(ConversionTest, GPUInferencePreference) {
293 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
294 settings_.tflite_settings->gpu_settings = std::make_unique<GPUSettingsT>();
295 GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
296
297 input_settings->inference_preference =
298 GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
299 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
300 proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
301 EXPECT_EQ(
302 proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
303 output_settings.inference_preference());
304
305 input_settings->inference_preference =
306 GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
307 compute = ConvertFromFlatbuffer(settings_);
308 output_settings = compute.tflite_settings().gpu_settings();
309 EXPECT_EQ(proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED,
310 output_settings.inference_preference());
311 }
312
TEST_F(ConversionTest,HexagonSettings)313 TEST_F(ConversionTest, HexagonSettings) {
314 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
315 settings_.tflite_settings->hexagon_settings =
316 std::make_unique<HexagonSettingsT>();
317 HexagonSettingsT* input_settings =
318 settings_.tflite_settings->hexagon_settings.get();
319 input_settings->debug_level = 1;
320 input_settings->powersave_level = 2;
321 input_settings->print_graph_profile = true;
322
323 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
324 const proto::HexagonSettings& output_settings =
325 compute.tflite_settings().hexagon_settings();
326 EXPECT_EQ(1, output_settings.debug_level());
327 EXPECT_EQ(2, output_settings.powersave_level());
328 EXPECT_TRUE(output_settings.print_graph_profile());
329 EXPECT_FALSE(output_settings.print_graph_debug());
330 }
331
TEST_F(ConversionTest,EdgeTpuSettings)332 TEST_F(ConversionTest, EdgeTpuSettings) {
333 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
334 settings_.tflite_settings->edgetpu_settings =
335 std::make_unique<EdgeTpuSettingsT>();
336 EdgeTpuSettingsT* input_settings =
337 settings_.tflite_settings->edgetpu_settings.get();
338
339 constexpr EdgeTpuPowerState kInferencePowerState = EdgeTpuPowerState_ACTIVE;
340 constexpr EdgeTpuPowerState kInactivePowerState =
341 EdgeTpuPowerState_ACTIVE_MIN_POWER;
342 constexpr int64_t kInactiveTimeoutUs = 300000;
343 constexpr int kInferencePriority = 2;
344 const std::string kModelToken = "model_token";
345 constexpr EdgeTpuSettings_::FloatTruncationType kFloatTruncationType =
346 EdgeTpuSettings_::FloatTruncationType_HALF;
347
348 input_settings->inference_power_state = kInferencePowerState;
349 input_settings->inference_priority = kInferencePriority;
350 input_settings->model_token = kModelToken;
351 input_settings->float_truncation_type = kFloatTruncationType;
352
353 std::unique_ptr<EdgeTpuInactivePowerConfigT> inactive_power_config(
354 new EdgeTpuInactivePowerConfigT());
355 inactive_power_config->inactive_power_state = kInactivePowerState;
356 inactive_power_config->inactive_timeout_us = kInactiveTimeoutUs;
357 input_settings->inactive_power_configs.emplace_back(
358 std::move(inactive_power_config));
359
360 constexpr EdgeTpuDeviceSpec_::PlatformType kPlatformType =
361 EdgeTpuDeviceSpec_::PlatformType_MMIO;
362 constexpr int kNumChips = 1;
363 const std::string kDevicePath = "/dev/abrolhos";
364 constexpr int kChipFamily = 1;
365
366 input_settings->edgetpu_device_spec = std::make_unique<EdgeTpuDeviceSpecT>();
367 EdgeTpuDeviceSpecT* input_spec = input_settings->edgetpu_device_spec.get();
368 input_spec->platform_type = kPlatformType;
369 input_spec->num_chips = kNumChips;
370 input_spec->chip_family = kChipFamily;
371
372 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
373 proto::EdgeTpuSettings output_settings =
374 compute.tflite_settings().edgetpu_settings();
375
376 EXPECT_EQ(
377 static_cast<EdgeTpuPowerState>(output_settings.inference_power_state()),
378 kInferencePowerState);
379 EXPECT_EQ(output_settings.inactive_power_configs().size(), 1);
380 EXPECT_EQ(
381 static_cast<EdgeTpuPowerState>(output_settings.inactive_power_configs()
382 .at(0)
383 .inactive_power_state()),
384 kInactivePowerState);
385 EXPECT_EQ(
386 output_settings.inactive_power_configs().at(0).inactive_timeout_us(),
387 kInactiveTimeoutUs);
388
389 EXPECT_EQ(output_settings.inference_priority(), kInferencePriority);
390 EXPECT_EQ(output_settings.model_token(), kModelToken);
391 EXPECT_EQ(static_cast<EdgeTpuSettings_::FloatTruncationType>(
392 output_settings.float_truncation_type()),
393 kFloatTruncationType);
394
395 EXPECT_EQ(static_cast<EdgeTpuDeviceSpec_::PlatformType>(
396 output_settings.edgetpu_device_spec().platform_type()),
397 kPlatformType);
398 EXPECT_EQ(output_settings.edgetpu_device_spec().num_chips(), kNumChips);
399 EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths_size(), 0);
400 EXPECT_EQ(output_settings.edgetpu_device_spec().chip_family(), kChipFamily);
401
402 input_spec->device_paths.push_back(kDevicePath);
403
404 compute = ConvertFromFlatbuffer(settings_);
405 output_settings = compute.tflite_settings().edgetpu_settings();
406 EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths().size(), 1);
407 EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths()[0],
408 kDevicePath);
409 }
410
TEST_F(ConversionTest,XNNPackSettings)411 TEST_F(ConversionTest, XNNPackSettings) {
412 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
413 settings_.tflite_settings->xnnpack_settings =
414 std::make_unique<XNNPackSettingsT>();
415 XNNPackSettingsT* input_settings =
416 settings_.tflite_settings->xnnpack_settings.get();
417
418 input_settings->num_threads = 2;
419 input_settings->flags =
420 tflite::XNNPackFlags::XNNPackFlags_TFLITE_XNNPACK_DELEGATE_FLAG_QS8_QU8;
421 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
422 EXPECT_EQ(compute.tflite_settings().xnnpack_settings().num_threads(), 2);
423 EXPECT_EQ(compute.tflite_settings().xnnpack_settings().flags(), 3);
424 }
425
TEST_F(ConversionTest,CoreMLSettings)426 TEST_F(ConversionTest, CoreMLSettings) {
427 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
428 settings_.tflite_settings->coreml_settings =
429 std::make_unique<CoreMLSettingsT>();
430 CoreMLSettingsT* input_settings =
431 settings_.tflite_settings->coreml_settings.get();
432
433 input_settings->enabled_devices =
434 CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
435 input_settings->coreml_version = 3;
436 input_settings->max_delegated_partitions = 10;
437 input_settings->min_nodes_per_partition = 4;
438 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
439 EXPECT_EQ(compute.tflite_settings().coreml_settings().enabled_devices(),
440 proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE);
441 EXPECT_EQ(compute.tflite_settings().coreml_settings().coreml_version(), 3);
442 EXPECT_EQ(
443 compute.tflite_settings().coreml_settings().max_delegated_partitions(),
444 10);
445 EXPECT_EQ(
446 compute.tflite_settings().coreml_settings().min_nodes_per_partition(), 4);
447 }
448
TEST_F(ConversionTest,CoralSettings)449 TEST_F(ConversionTest, CoralSettings) {
450 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
451 settings_.tflite_settings->coral_settings =
452 std::make_unique<CoralSettingsT>();
453 CoralSettingsT* input_settings =
454 settings_.tflite_settings->coral_settings.get();
455
456 input_settings->device = "test";
457 input_settings->performance = CoralSettings_::Performance_HIGH;
458 input_settings->usb_always_dfu = true;
459 input_settings->usb_max_bulk_in_queue_length = 768;
460
461 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
462 const proto::CoralSettings& output_settings =
463 compute.tflite_settings().coral_settings();
464 EXPECT_EQ("test", output_settings.device());
465 EXPECT_TRUE(output_settings.usb_always_dfu());
466 EXPECT_EQ(proto::CoralSettings::HIGH, output_settings.performance());
467 EXPECT_EQ(768, output_settings.usb_max_bulk_in_queue_length());
468 }
469
TEST_F(ConversionTest,CPUSettings)470 TEST_F(ConversionTest, CPUSettings) {
471 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
472 settings_.tflite_settings->cpu_settings = std::make_unique<CPUSettingsT>();
473
474 settings_.tflite_settings->cpu_settings->num_threads = 2;
475 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
476 EXPECT_EQ(compute.tflite_settings().cpu_settings().num_threads(), 2);
477 }
478
TEST_F(ConversionTest,MaxDelegatedPartitions)479 TEST_F(ConversionTest, MaxDelegatedPartitions) {
480 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
481 settings_.tflite_settings->max_delegated_partitions = 2;
482 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
483 EXPECT_EQ(compute.tflite_settings().max_delegated_partitions(), 2);
484 }
485
TEST_F(ConversionTest,MiniBenchmarkSettings)486 TEST_F(ConversionTest, MiniBenchmarkSettings) {
487 settings_.tflite_settings = std::make_unique<TFLiteSettingsT>();
488 settings_.tflite_settings->cpu_settings = std::make_unique<CPUSettingsT>();
489 settings_.tflite_settings->cpu_settings->num_threads = 2;
490 settings_.model_identifier_for_statistics = "id";
491 settings_.model_namespace_for_statistics = "ns";
492 settings_.settings_to_test_locally =
493 std::make_unique<MinibenchmarkSettingsT>();
494 MinibenchmarkSettingsT* mini_settings =
495 settings_.settings_to_test_locally.get();
496 mini_settings->model_file = std::make_unique<ModelFileT>();
497 mini_settings->model_file->filename = "test_model";
498 mini_settings->storage_paths = std::make_unique<BenchmarkStoragePathsT>();
499 mini_settings->storage_paths->storage_file_path = "/data/local/tmp";
500 std::unique_ptr<TFLiteSettingsT> xnnpack(new TFLiteSettingsT());
501 xnnpack->xnnpack_settings = std::make_unique<XNNPackSettingsT>();
502 xnnpack->xnnpack_settings->num_threads = 2;
503 std::unique_ptr<TFLiteSettingsT> hexagon(new TFLiteSettingsT());
504 hexagon->hexagon_settings = std::make_unique<HexagonSettingsT>();
505 hexagon->hexagon_settings->powersave_level = 3;
506 std::unique_ptr<TFLiteSettingsT> coreml(new TFLiteSettingsT());
507 coreml->coreml_settings = std::make_unique<CoreMLSettingsT>();
508 coreml->coreml_settings->enabled_devices =
509 CoreMLSettings_::EnabledDevices_DEVICES_WITH_NEURAL_ENGINE;
510 coreml->coreml_settings->coreml_version = 3;
511 coreml->coreml_settings->max_delegated_partitions = 10;
512 coreml->coreml_settings->min_nodes_per_partition = 4;
513 mini_settings->settings_to_test.emplace_back(std::move(xnnpack));
514 mini_settings->settings_to_test.emplace_back(std::move(hexagon));
515 mini_settings->settings_to_test.emplace_back(std::move(coreml));
516
517 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
518 EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
519 EXPECT_EQ("id", compute.model_identifier_for_statistics());
520 EXPECT_EQ("ns", compute.model_namespace_for_statistics());
521 EXPECT_TRUE(compute.has_settings_to_test_locally());
522 const proto::MinibenchmarkSettings& mini_output =
523 compute.settings_to_test_locally();
524 EXPECT_EQ("test_model", mini_output.model_file().filename());
525 EXPECT_EQ("/data/local/tmp", mini_output.storage_paths().storage_file_path());
526
527 EXPECT_EQ(3, mini_output.settings_to_test_size());
528 EXPECT_EQ(
529 2, mini_output.settings_to_test().at(0).xnnpack_settings().num_threads());
530 EXPECT_EQ(3, mini_output.settings_to_test()
531 .at(1)
532 .hexagon_settings()
533 .powersave_level());
534
535 EXPECT_EQ(
536 proto::CoreMLSettings::DEVICES_WITH_NEURAL_ENGINE,
537 mini_output.settings_to_test().at(2).coreml_settings().enabled_devices());
538 EXPECT_EQ(
539 3,
540 mini_output.settings_to_test().at(2).coreml_settings().coreml_version());
541 EXPECT_EQ(10, mini_output.settings_to_test()
542 .at(2)
543 .coreml_settings()
544 .max_delegated_partitions());
545 EXPECT_EQ(4, mini_output.settings_to_test()
546 .at(2)
547 .coreml_settings()
548 .min_nodes_per_partition());
549
550 compute =
551 ConvertFromFlatbuffer(settings_, /*skip_mini_benchmark_settings=*/true);
552 EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
553 EXPECT_EQ("id", compute.model_identifier_for_statistics());
554 EXPECT_EQ("ns", compute.model_namespace_for_statistics());
555 EXPECT_FALSE(compute.has_settings_to_test_locally());
556 }
557
TEST_F(ConversionTest,BestAccelerationDecisionEvent)558 TEST_F(ConversionTest, BestAccelerationDecisionEvent) {
559 event_.is_log_flushing_event = true;
560 event_.best_acceleration_decision =
561 std::make_unique<BestAccelerationDecisionT>();
562 event_.best_acceleration_decision->number_of_source_events = 4;
563 event_.best_acceleration_decision->min_inference_time_us = 3000;
564
565 proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
566 EXPECT_TRUE(proto_event.is_log_flushing_event());
567 const auto& best_decision = proto_event.best_acceleration_decision();
568 EXPECT_EQ(4, best_decision.number_of_source_events());
569 EXPECT_EQ(3000, best_decision.min_inference_time_us());
570 EXPECT_FALSE(best_decision.has_min_latency_event());
571
572 event_.best_acceleration_decision->min_latency_event =
573 std::make_unique<BenchmarkEventT>();
574 auto* min_event = event_.best_acceleration_decision->min_latency_event.get();
575 min_event->event_type = BenchmarkEventType_END;
576 min_event->tflite_settings = std::make_unique<TFLiteSettingsT>();
577 min_event->tflite_settings->delegate = Delegate_XNNPACK;
578 min_event->tflite_settings->xnnpack_settings =
579 std::make_unique<XNNPackSettingsT>();
580 min_event->tflite_settings->xnnpack_settings->num_threads = 2;
581 min_event->result = std::make_unique<BenchmarkResultT>();
582 min_event->result->initialization_time_us.push_back(100);
583 min_event->result->initialization_time_us.push_back(110);
584 min_event->result->inference_time_us.push_back(3000);
585 min_event->result->inference_time_us.push_back(3500);
586 min_event->result->max_memory_kb = 1234;
587 min_event->result->ok = true;
588 min_event->boottime_us = 1111;
589 min_event->wallclock_us = 2222;
590
591 proto_event = ConvertFromFlatbuffer(event_);
592 EXPECT_TRUE(proto_event.best_acceleration_decision().has_min_latency_event());
593 const auto& proto_min_event =
594 proto_event.best_acceleration_decision().min_latency_event();
595 EXPECT_EQ(proto::BenchmarkEventType::END, proto_min_event.event_type());
596 EXPECT_EQ(proto::Delegate::XNNPACK,
597 proto_min_event.tflite_settings().delegate());
598 EXPECT_EQ(2,
599 proto_min_event.tflite_settings().xnnpack_settings().num_threads());
600 EXPECT_TRUE(proto_min_event.has_result());
601 EXPECT_EQ(2, proto_min_event.result().initialization_time_us_size());
602 EXPECT_EQ(100, proto_min_event.result().initialization_time_us()[0]);
603 EXPECT_EQ(110, proto_min_event.result().initialization_time_us()[1]);
604 EXPECT_EQ(2, proto_min_event.result().inference_time_us_size());
605 EXPECT_EQ(3000, proto_min_event.result().inference_time_us()[0]);
606 EXPECT_EQ(3500, proto_min_event.result().inference_time_us()[1]);
607 EXPECT_EQ(1234, proto_min_event.result().max_memory_kb());
608 EXPECT_TRUE(proto_min_event.result().ok());
609 EXPECT_EQ(1111, proto_min_event.boottime_us());
610 EXPECT_EQ(2222, proto_min_event.wallclock_us());
611 }
612
TEST_F(ConversionTest,BenchmarkInitializationEvent)613 TEST_F(ConversionTest, BenchmarkInitializationEvent) {
614 event_.initialization_failure =
615 std::make_unique<BenchmarkInitializationFailureT>();
616 event_.initialization_failure->initialization_status = 101;
617
618 proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
619 EXPECT_FALSE(proto_event.is_log_flushing_event());
620 EXPECT_EQ(101, proto_event.initialization_failure().initialization_status());
621 }
622
TEST_F(ConversionTest,BenchmarkError)623 TEST_F(ConversionTest, BenchmarkError) {
624 event_.benchmark_event = std::make_unique<BenchmarkEventT>();
625 event_.benchmark_event->error = std::make_unique<BenchmarkErrorT>();
626 auto* error = event_.benchmark_event->error.get();
627 error->stage = BenchmarkStage_INITIALIZATION;
628 error->exit_code = 123;
629 error->signal = 321;
630 error->mini_benchmark_error_code = 456;
631 std::unique_ptr<ErrorCodeT> code1(new ErrorCodeT());
632 code1->source = Delegate_EDGETPU;
633 code1->tflite_error = 3;
634 code1->underlying_api_error = 301;
635 error->error_code.emplace_back(std::move(code1));
636 std::unique_ptr<ErrorCodeT> code2(new ErrorCodeT());
637 code2->source = Delegate_NNAPI;
638 code2->tflite_error = 4;
639 code2->underlying_api_error = 404;
640 error->error_code.emplace_back(std::move(code2));
641
642 const proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
643 const auto& proto_error = proto_event.benchmark_event().error();
644 EXPECT_EQ(proto::BenchmarkStage::INITIALIZATION, proto_error.stage());
645 EXPECT_EQ(123, proto_error.exit_code());
646 EXPECT_EQ(321, proto_error.signal());
647 EXPECT_EQ(456, proto_error.mini_benchmark_error_code());
648 EXPECT_EQ(2, proto_error.error_code_size());
649
650 EXPECT_EQ(proto::Delegate::EDGETPU, proto_error.error_code()[0].source());
651 EXPECT_EQ(3, proto_error.error_code()[0].tflite_error());
652 EXPECT_EQ(301, proto_error.error_code()[0].underlying_api_error());
653
654 EXPECT_EQ(proto::Delegate::NNAPI, proto_error.error_code()[1].source());
655 EXPECT_EQ(4, proto_error.error_code()[1].tflite_error());
656 EXPECT_EQ(404, proto_error.error_code()[1].underlying_api_error());
657 }
658
TEST_F(ConversionTest,BenchmarkMetric)659 TEST_F(ConversionTest, BenchmarkMetric) {
660 event_.benchmark_event = std::make_unique<BenchmarkEventT>();
661 event_.benchmark_event->result = std::make_unique<BenchmarkResultT>();
662 std::unique_ptr<BenchmarkMetricT> metric(new BenchmarkMetricT());
663 metric->name = "test";
664 metric->values.push_back(1.234);
665 metric->values.push_back(5.678);
666 event_.benchmark_event->result->metrics.emplace_back(std::move(metric));
667
668 const proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
669 EXPECT_EQ(1, proto_event.benchmark_event().result().metrics_size());
670 const auto& proto_metric =
671 proto_event.benchmark_event().result().metrics()[0];
672 EXPECT_EQ("test", proto_metric.name());
673 EXPECT_EQ(2, proto_metric.values_size());
674 EXPECT_FLOAT_EQ(1.234, proto_metric.values()[0]);
675 EXPECT_FLOAT_EQ(5.678, proto_metric.values()[1]);
676 }
677 } // namespace
678 } // namespace acceleration
679 } // namespace tflite
680