1 /* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
2
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6
7 http://www.apache.org/licenses/LICENSE-2.0
8
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 #include "tensorflow/lite/experimental/acceleration/configuration/flatbuffer_to_proto.h"
16
17 #include <memory>
18 #include <string>
19 #include <utility>
20
21 #include <gmock/gmock.h>
22 #include <gtest/gtest.h>
23 #include "tensorflow/lite/experimental/acceleration/configuration/configuration.pb.h"
24 #include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
25
26 namespace tflite {
27 namespace acceleration {
28 namespace {
29
30 class ConversionTest : public ::testing::Test {
31 protected:
CheckDelegateEnum(Delegate input,proto::Delegate output)32 void CheckDelegateEnum(Delegate input, proto::Delegate output) {
33 settings_.tflite_settings.reset(new TFLiteSettingsT());
34 settings_.tflite_settings->delegate = input;
35 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
36 EXPECT_EQ(output, compute.tflite_settings().delegate());
37 }
CheckExecutionPreference(ExecutionPreference input,proto::ExecutionPreference output)38 void CheckExecutionPreference(ExecutionPreference input,
39 proto::ExecutionPreference output) {
40 settings_.preference = input;
41 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
42 EXPECT_EQ(output, compute.preference());
43 }
CheckNNAPIExecutionPreference(NNAPIExecutionPreference input,proto::NNAPIExecutionPreference output)44 void CheckNNAPIExecutionPreference(NNAPIExecutionPreference input,
45 proto::NNAPIExecutionPreference output) {
46 settings_.tflite_settings.reset(new TFLiteSettingsT());
47 settings_.tflite_settings->nnapi_settings.reset(new NNAPISettingsT());
48 settings_.tflite_settings->nnapi_settings->execution_preference = input;
49 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
50 EXPECT_EQ(
51 output,
52 compute.tflite_settings().nnapi_settings().execution_preference());
53 }
CheckNNAPIExecutionPriority(NNAPIExecutionPriority input,proto::NNAPIExecutionPriority output)54 void CheckNNAPIExecutionPriority(NNAPIExecutionPriority input,
55 proto::NNAPIExecutionPriority output) {
56 settings_.tflite_settings.reset(new TFLiteSettingsT());
57 settings_.tflite_settings->nnapi_settings.reset(new NNAPISettingsT());
58 settings_.tflite_settings->nnapi_settings->execution_priority = input;
59 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
60 EXPECT_EQ(output,
61 compute.tflite_settings().nnapi_settings().execution_priority());
62 }
CheckGPUBackend(GPUBackend input,proto::GPUBackend output)63 void CheckGPUBackend(GPUBackend input, proto::GPUBackend output) {
64 settings_.tflite_settings.reset(new TFLiteSettingsT());
65 settings_.tflite_settings->gpu_settings.reset(new GPUSettingsT());
66 settings_.tflite_settings->gpu_settings->force_backend = input;
67 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
68 EXPECT_EQ(output, compute.tflite_settings().gpu_settings().force_backend());
69 }
70
71 ComputeSettingsT settings_;
72 MiniBenchmarkEventT event_;
73 };
74
TEST_F(ConversionTest,Delegate)75 TEST_F(ConversionTest, Delegate) {
76 CheckDelegateEnum(Delegate_NONE, proto::Delegate::NONE);
77 CheckDelegateEnum(Delegate_NNAPI, proto::Delegate::NNAPI);
78 CheckDelegateEnum(Delegate_GPU, proto::Delegate::GPU);
79 CheckDelegateEnum(Delegate_HEXAGON, proto::Delegate::HEXAGON);
80 CheckDelegateEnum(Delegate_EDGETPU, proto::Delegate::EDGETPU);
81 CheckDelegateEnum(Delegate_EDGETPU_CORAL, proto::Delegate::EDGETPU_CORAL);
82 CheckDelegateEnum(Delegate_XNNPACK, proto::Delegate::XNNPACK);
83 }
84
TEST_F(ConversionTest,ExecutionPreference)85 TEST_F(ConversionTest, ExecutionPreference) {
86 CheckExecutionPreference(ExecutionPreference_ANY,
87 proto::ExecutionPreference::ANY);
88 CheckExecutionPreference(ExecutionPreference_LOW_LATENCY,
89 proto::ExecutionPreference::LOW_LATENCY);
90 CheckExecutionPreference(ExecutionPreference_LOW_POWER,
91 proto::ExecutionPreference::LOW_POWER);
92 CheckExecutionPreference(ExecutionPreference_FORCE_CPU,
93 proto::ExecutionPreference::FORCE_CPU);
94 }
95
TEST_F(ConversionTest,ModelIdentifier)96 TEST_F(ConversionTest, ModelIdentifier) {
97 settings_.model_identifier_for_statistics = "id";
98 settings_.model_namespace_for_statistics = "ns";
99 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
100 EXPECT_EQ(compute.model_namespace_for_statistics(), "ns");
101 EXPECT_EQ(compute.model_identifier_for_statistics(), "id");
102 }
103
TEST_F(ConversionTest,NNAPISettings)104 TEST_F(ConversionTest, NNAPISettings) {
105 settings_.tflite_settings.reset(new TFLiteSettingsT());
106 settings_.tflite_settings->nnapi_settings.reset(new NNAPISettingsT());
107 NNAPISettingsT* input_settings =
108 settings_.tflite_settings->nnapi_settings.get();
109 input_settings->accelerator_name = "a";
110 input_settings->cache_directory = "d";
111 input_settings->model_token = "t";
112 input_settings->allow_fp16_precision_for_fp32 = true;
113
114 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
115 proto::NNAPISettings output_settings =
116 compute.tflite_settings().nnapi_settings();
117 EXPECT_EQ(output_settings.accelerator_name(), "a");
118 EXPECT_EQ(output_settings.cache_directory(), "d");
119 EXPECT_EQ(output_settings.model_token(), "t");
120 EXPECT_TRUE(output_settings.allow_fp16_precision_for_fp32());
121 EXPECT_FALSE(output_settings.allow_nnapi_cpu_on_android_10_plus());
122 EXPECT_FALSE(output_settings.fallback_settings()
123 .allow_automatic_fallback_on_compilation_error());
124 EXPECT_FALSE(output_settings.fallback_settings()
125 .allow_automatic_fallback_on_execution_error());
126
127 input_settings->fallback_settings.reset(new FallbackSettingsT());
128 input_settings->fallback_settings
129 ->allow_automatic_fallback_on_compilation_error = true;
130 compute = ConvertFromFlatbuffer(settings_);
131 output_settings = compute.tflite_settings().nnapi_settings();
132 EXPECT_TRUE(output_settings.fallback_settings()
133 .allow_automatic_fallback_on_compilation_error());
134 EXPECT_FALSE(output_settings.fallback_settings()
135 .allow_automatic_fallback_on_execution_error());
136
137 input_settings->fallback_settings
138 ->allow_automatic_fallback_on_compilation_error = false;
139 input_settings->fallback_settings
140 ->allow_automatic_fallback_on_execution_error = true;
141 compute = ConvertFromFlatbuffer(settings_);
142 output_settings = compute.tflite_settings().nnapi_settings();
143 EXPECT_FALSE(output_settings.fallback_settings()
144 .allow_automatic_fallback_on_compilation_error());
145 EXPECT_TRUE(output_settings.fallback_settings()
146 .allow_automatic_fallback_on_execution_error());
147
148 input_settings->allow_fp16_precision_for_fp32 = false;
149 compute = ConvertFromFlatbuffer(settings_);
150 output_settings = compute.tflite_settings().nnapi_settings();
151 EXPECT_FALSE(output_settings.allow_fp16_precision_for_fp32());
152 }
153
TEST_F(ConversionTest,NNAPIAllowDynamicDimensions)154 TEST_F(ConversionTest, NNAPIAllowDynamicDimensions) {
155 settings_.tflite_settings.reset(new TFLiteSettingsT());
156 settings_.tflite_settings->nnapi_settings.reset(new NNAPISettingsT());
157 NNAPISettingsT* input_settings =
158 settings_.tflite_settings->nnapi_settings.get();
159
160 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
161 proto::NNAPISettings output_settings =
162 compute.tflite_settings().nnapi_settings();
163 EXPECT_FALSE(output_settings.allow_dynamic_dimensions());
164
165 input_settings->allow_dynamic_dimensions = true;
166 compute = ConvertFromFlatbuffer(settings_);
167 output_settings = compute.tflite_settings().nnapi_settings();
168 EXPECT_TRUE(output_settings.allow_dynamic_dimensions());
169 }
170
TEST_F(ConversionTest,NNAPIBurstComputation)171 TEST_F(ConversionTest, NNAPIBurstComputation) {
172 settings_.tflite_settings.reset(new TFLiteSettingsT());
173 settings_.tflite_settings->nnapi_settings.reset(new NNAPISettingsT());
174 NNAPISettingsT* input_settings =
175 settings_.tflite_settings->nnapi_settings.get();
176
177 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
178 proto::NNAPISettings output_settings =
179 compute.tflite_settings().nnapi_settings();
180 EXPECT_FALSE(output_settings.use_burst_computation());
181
182 input_settings->use_burst_computation = true;
183 compute = ConvertFromFlatbuffer(settings_);
184 output_settings = compute.tflite_settings().nnapi_settings();
185 EXPECT_TRUE(output_settings.use_burst_computation());
186 }
187
TEST_F(ConversionTest,NNAPIExecutionPreference)188 TEST_F(ConversionTest, NNAPIExecutionPreference) {
189 CheckNNAPIExecutionPreference(
190 NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
191 proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER);
192 CheckNNAPIExecutionPreference(
193 NNAPIExecutionPreference_NNAPI_LOW_POWER,
194 proto::NNAPIExecutionPreference::NNAPI_LOW_POWER);
195 CheckNNAPIExecutionPreference(
196 NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED,
197 proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED);
198 CheckNNAPIExecutionPreference(NNAPIExecutionPreference_UNDEFINED,
199 proto::NNAPIExecutionPreference::UNDEFINED);
200 }
201
TEST_F(ConversionTest,NNAPIExecutionPriority)202 TEST_F(ConversionTest, NNAPIExecutionPriority) {
203 CheckNNAPIExecutionPriority(
204 NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
205 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW);
206 CheckNNAPIExecutionPriority(
207 NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
208 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM);
209 CheckNNAPIExecutionPriority(
210 NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH,
211 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH);
212 CheckNNAPIExecutionPriority(
213 NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
214 proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED);
215 }
216
TEST_F(ConversionTest,GPUSettings)217 TEST_F(ConversionTest, GPUSettings) {
218 settings_.tflite_settings.reset(new TFLiteSettingsT());
219 settings_.tflite_settings->gpu_settings.reset(new GPUSettingsT());
220 GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
221
222 input_settings->is_precision_loss_allowed = true;
223 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
224 proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
225 EXPECT_TRUE(output_settings.is_precision_loss_allowed());
226
227 input_settings->is_precision_loss_allowed = false;
228 compute = ConvertFromFlatbuffer(settings_);
229 output_settings = compute.tflite_settings().gpu_settings();
230 EXPECT_FALSE(output_settings.is_precision_loss_allowed());
231
232 EXPECT_TRUE(output_settings.enable_quantized_inference());
233 input_settings->enable_quantized_inference = false;
234 compute = ConvertFromFlatbuffer(settings_);
235 output_settings = compute.tflite_settings().gpu_settings();
236 EXPECT_FALSE(output_settings.enable_quantized_inference());
237 }
238
TEST_F(ConversionTest,GPUBacked)239 TEST_F(ConversionTest, GPUBacked) {
240 CheckGPUBackend(GPUBackend_UNSET, proto::GPUBackend::UNSET);
241 CheckGPUBackend(GPUBackend_OPENCL, proto::GPUBackend::OPENCL);
242 CheckGPUBackend(GPUBackend_OPENGL, proto::GPUBackend::OPENGL);
243 }
244
TEST_F(ConversionTest,GPUInferencePriority)245 TEST_F(ConversionTest, GPUInferencePriority) {
246 settings_.tflite_settings.reset(new TFLiteSettingsT());
247 settings_.tflite_settings->gpu_settings.reset(new GPUSettingsT());
248 GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
249
250 input_settings->inference_priority1 =
251 GPUInferencePriority_GPU_PRIORITY_MIN_MEMORY_USAGE;
252 input_settings->inference_priority2 =
253 GPUInferencePriority_GPU_PRIORITY_MIN_LATENCY;
254 // Third priority is AUTO by default.
255
256 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
257 proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
258
259 EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_MEMORY_USAGE,
260 output_settings.inference_priority1());
261 EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_MIN_LATENCY,
262 output_settings.inference_priority2());
263 EXPECT_EQ(proto::GPUInferencePriority::GPU_PRIORITY_AUTO,
264 output_settings.inference_priority3());
265 }
266
TEST_F(ConversionTest,GPUInferencePreference)267 TEST_F(ConversionTest, GPUInferencePreference) {
268 settings_.tflite_settings.reset(new TFLiteSettingsT());
269 settings_.tflite_settings->gpu_settings.reset(new GPUSettingsT());
270 GPUSettingsT* input_settings = settings_.tflite_settings->gpu_settings.get();
271
272 input_settings->inference_preference =
273 GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER;
274 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
275 proto::GPUSettings output_settings = compute.tflite_settings().gpu_settings();
276 EXPECT_EQ(
277 proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_FAST_SINGLE_ANSWER,
278 output_settings.inference_preference());
279
280 input_settings->inference_preference =
281 GPUInferenceUsage_GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED;
282 compute = ConvertFromFlatbuffer(settings_);
283 output_settings = compute.tflite_settings().gpu_settings();
284 EXPECT_EQ(proto::GPUInferenceUsage::GPU_INFERENCE_PREFERENCE_SUSTAINED_SPEED,
285 output_settings.inference_preference());
286 }
287
TEST_F(ConversionTest,HexagonSettings)288 TEST_F(ConversionTest, HexagonSettings) {
289 settings_.tflite_settings.reset(new TFLiteSettingsT());
290 settings_.tflite_settings->hexagon_settings.reset(new HexagonSettingsT());
291 HexagonSettingsT* input_settings =
292 settings_.tflite_settings->hexagon_settings.get();
293 input_settings->debug_level = 1;
294 input_settings->powersave_level = 2;
295 input_settings->print_graph_profile = true;
296
297 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
298 const proto::HexagonSettings& output_settings =
299 compute.tflite_settings().hexagon_settings();
300 EXPECT_EQ(1, output_settings.debug_level());
301 EXPECT_EQ(2, output_settings.powersave_level());
302 EXPECT_TRUE(output_settings.print_graph_profile());
303 EXPECT_FALSE(output_settings.print_graph_debug());
304 }
305
TEST_F(ConversionTest,EdgeTpuSettings)306 TEST_F(ConversionTest, EdgeTpuSettings) {
307 settings_.tflite_settings.reset(new TFLiteSettingsT());
308 settings_.tflite_settings->edgetpu_settings.reset(new EdgeTpuSettingsT());
309 EdgeTpuSettingsT* input_settings =
310 settings_.tflite_settings->edgetpu_settings.get();
311
312 constexpr EdgeTpuPowerState kInferencePowerState = EdgeTpuPowerState_ACTIVE;
313 constexpr EdgeTpuPowerState kInactivePowerState =
314 EdgeTpuPowerState_ACTIVE_MIN_POWER;
315 constexpr int64_t kInactiveTimeoutUs = 300000;
316 constexpr int kInferencePriority = 2;
317 const std::string kModelToken = "model_token";
318 constexpr EdgeTpuSettings_::FloatTruncationType kFloatTruncationType =
319 EdgeTpuSettings_::FloatTruncationType_HALF;
320
321 input_settings->inference_power_state = kInferencePowerState;
322 input_settings->inference_priority = kInferencePriority;
323 input_settings->model_token = kModelToken;
324 input_settings->float_truncation_type = kFloatTruncationType;
325
326 std::unique_ptr<EdgeTpuInactivePowerConfigT> inactive_power_config(
327 new EdgeTpuInactivePowerConfigT());
328 inactive_power_config->inactive_power_state = kInactivePowerState;
329 inactive_power_config->inactive_timeout_us = kInactiveTimeoutUs;
330 input_settings->inactive_power_configs.emplace_back(
331 std::move(inactive_power_config));
332
333 constexpr EdgeTpuDeviceSpec_::PlatformType kPlatformType =
334 EdgeTpuDeviceSpec_::PlatformType_MMIO;
335 constexpr int kNumChips = 1;
336 const std::string kDevicePath = "/dev/abrolhos";
337 constexpr int kChipFamily = 1;
338
339 input_settings->edgetpu_device_spec.reset(new EdgeTpuDeviceSpecT());
340 EdgeTpuDeviceSpecT* input_spec = input_settings->edgetpu_device_spec.get();
341 input_spec->platform_type = kPlatformType;
342 input_spec->num_chips = kNumChips;
343 input_spec->chip_family = kChipFamily;
344
345 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
346 proto::EdgeTpuSettings output_settings =
347 compute.tflite_settings().edgetpu_settings();
348
349 EXPECT_EQ(
350 static_cast<EdgeTpuPowerState>(output_settings.inference_power_state()),
351 kInferencePowerState);
352 EXPECT_EQ(output_settings.inactive_power_configs().size(), 1);
353 EXPECT_EQ(
354 static_cast<EdgeTpuPowerState>(output_settings.inactive_power_configs()
355 .at(0)
356 .inactive_power_state()),
357 kInactivePowerState);
358 EXPECT_EQ(
359 output_settings.inactive_power_configs().at(0).inactive_timeout_us(),
360 kInactiveTimeoutUs);
361
362 EXPECT_EQ(output_settings.inference_priority(), kInferencePriority);
363 EXPECT_EQ(output_settings.model_token(), kModelToken);
364 EXPECT_EQ(static_cast<EdgeTpuSettings_::FloatTruncationType>(
365 output_settings.float_truncation_type()),
366 kFloatTruncationType);
367
368 EXPECT_EQ(static_cast<EdgeTpuDeviceSpec_::PlatformType>(
369 output_settings.edgetpu_device_spec().platform_type()),
370 kPlatformType);
371 EXPECT_EQ(output_settings.edgetpu_device_spec().num_chips(), kNumChips);
372 EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths_size(), 0);
373 EXPECT_EQ(output_settings.edgetpu_device_spec().chip_family(), kChipFamily);
374
375 input_spec->device_paths.push_back(kDevicePath);
376
377 compute = ConvertFromFlatbuffer(settings_);
378 output_settings = compute.tflite_settings().edgetpu_settings();
379 EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths().size(), 1);
380 EXPECT_EQ(output_settings.edgetpu_device_spec().device_paths()[0],
381 kDevicePath);
382 }
383
TEST_F(ConversionTest,XNNPackSettings)384 TEST_F(ConversionTest, XNNPackSettings) {
385 settings_.tflite_settings.reset(new TFLiteSettingsT());
386 settings_.tflite_settings->xnnpack_settings.reset(new XNNPackSettingsT());
387 XNNPackSettingsT* input_settings =
388 settings_.tflite_settings->xnnpack_settings.get();
389
390 input_settings->num_threads = 2;
391 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
392 EXPECT_EQ(compute.tflite_settings().xnnpack_settings().num_threads(), 2);
393 }
394
TEST_F(ConversionTest,CoralSettings)395 TEST_F(ConversionTest, CoralSettings) {
396 settings_.tflite_settings.reset(new TFLiteSettingsT());
397 settings_.tflite_settings->coral_settings.reset(new CoralSettingsT());
398 CoralSettingsT* input_settings =
399 settings_.tflite_settings->coral_settings.get();
400
401 input_settings->device = "test";
402 input_settings->performance = CoralSettings_::Performance_HIGH;
403 input_settings->usb_always_dfu = true;
404 input_settings->usb_max_bulk_in_queue_length = 768;
405
406 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
407 const proto::CoralSettings& output_settings =
408 compute.tflite_settings().coral_settings();
409 EXPECT_EQ("test", output_settings.device());
410 EXPECT_TRUE(output_settings.usb_always_dfu());
411 EXPECT_EQ(proto::CoralSettings::HIGH, output_settings.performance());
412 EXPECT_EQ(768, output_settings.usb_max_bulk_in_queue_length());
413 }
414
TEST_F(ConversionTest,CPUSettings)415 TEST_F(ConversionTest, CPUSettings) {
416 settings_.tflite_settings.reset(new TFLiteSettingsT());
417 settings_.tflite_settings->cpu_settings.reset(new CPUSettingsT());
418
419 settings_.tflite_settings->cpu_settings->num_threads = 2;
420 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
421 EXPECT_EQ(compute.tflite_settings().cpu_settings().num_threads(), 2);
422 }
423
TEST_F(ConversionTest,MaxDelegatedPartitions)424 TEST_F(ConversionTest, MaxDelegatedPartitions) {
425 settings_.tflite_settings.reset(new TFLiteSettingsT());
426 settings_.tflite_settings->max_delegated_partitions = 2;
427 const proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
428 EXPECT_EQ(compute.tflite_settings().max_delegated_partitions(), 2);
429 }
430
TEST_F(ConversionTest,MiniBenchmarkSettings)431 TEST_F(ConversionTest, MiniBenchmarkSettings) {
432 settings_.tflite_settings.reset(new TFLiteSettingsT());
433 settings_.tflite_settings->cpu_settings.reset(new CPUSettingsT());
434 settings_.tflite_settings->cpu_settings->num_threads = 2;
435 settings_.model_identifier_for_statistics = "id";
436 settings_.model_namespace_for_statistics = "ns";
437 settings_.settings_to_test_locally.reset(new MinibenchmarkSettingsT());
438 MinibenchmarkSettingsT* mini_settings =
439 settings_.settings_to_test_locally.get();
440 mini_settings->model_file.reset(new ModelFileT());
441 mini_settings->model_file->filename = "test_model";
442 mini_settings->storage_paths.reset(new BenchmarkStoragePathsT());
443 mini_settings->storage_paths->storage_file_path = "/data/local/tmp";
444 std::unique_ptr<TFLiteSettingsT> xnnpack(new TFLiteSettingsT());
445 xnnpack->xnnpack_settings.reset(new XNNPackSettingsT());
446 xnnpack->xnnpack_settings->num_threads = 2;
447 std::unique_ptr<TFLiteSettingsT> hexagon(new TFLiteSettingsT());
448 hexagon->hexagon_settings.reset(new HexagonSettingsT());
449 hexagon->hexagon_settings->powersave_level = 3;
450 mini_settings->settings_to_test.emplace_back(std::move(xnnpack));
451 mini_settings->settings_to_test.emplace_back(std::move(hexagon));
452
453 proto::ComputeSettings compute = ConvertFromFlatbuffer(settings_);
454 EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
455 EXPECT_EQ("id", compute.model_identifier_for_statistics());
456 EXPECT_EQ("ns", compute.model_namespace_for_statistics());
457 EXPECT_TRUE(compute.has_settings_to_test_locally());
458 const proto::MinibenchmarkSettings& mini_output =
459 compute.settings_to_test_locally();
460 EXPECT_EQ("test_model", mini_output.model_file().filename());
461 EXPECT_EQ("/data/local/tmp", mini_output.storage_paths().storage_file_path());
462
463 EXPECT_EQ(2, mini_output.settings_to_test_size());
464 EXPECT_EQ(
465 2, mini_output.settings_to_test().at(0).xnnpack_settings().num_threads());
466 EXPECT_EQ(3, mini_output.settings_to_test()
467 .at(1)
468 .hexagon_settings()
469 .powersave_level());
470
471 compute =
472 ConvertFromFlatbuffer(settings_, /*skip_mini_benchmark_settings=*/true);
473 EXPECT_EQ(2, compute.tflite_settings().cpu_settings().num_threads());
474 EXPECT_EQ("id", compute.model_identifier_for_statistics());
475 EXPECT_EQ("ns", compute.model_namespace_for_statistics());
476 EXPECT_FALSE(compute.has_settings_to_test_locally());
477 }
478
TEST_F(ConversionTest,BestAccelerationDecisionEvent)479 TEST_F(ConversionTest, BestAccelerationDecisionEvent) {
480 event_.is_log_flushing_event = true;
481 event_.best_acceleration_decision.reset(new BestAccelerationDecisionT());
482 event_.best_acceleration_decision->number_of_source_events = 4;
483 event_.best_acceleration_decision->min_inference_time_us = 3000;
484
485 proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
486 EXPECT_TRUE(proto_event.is_log_flushing_event());
487 const auto& best_decision = proto_event.best_acceleration_decision();
488 EXPECT_EQ(4, best_decision.number_of_source_events());
489 EXPECT_EQ(3000, best_decision.min_inference_time_us());
490 EXPECT_FALSE(best_decision.has_min_latency_event());
491
492 event_.best_acceleration_decision->min_latency_event.reset(
493 new BenchmarkEventT());
494 auto* min_event = event_.best_acceleration_decision->min_latency_event.get();
495 min_event->event_type = BenchmarkEventType_END;
496 min_event->tflite_settings.reset(new TFLiteSettingsT());
497 min_event->tflite_settings->delegate = Delegate_XNNPACK;
498 min_event->tflite_settings->xnnpack_settings.reset(new XNNPackSettingsT());
499 min_event->tflite_settings->xnnpack_settings->num_threads = 2;
500 min_event->result.reset(new BenchmarkResultT());
501 min_event->result->initialization_time_us.push_back(100);
502 min_event->result->initialization_time_us.push_back(110);
503 min_event->result->inference_time_us.push_back(3000);
504 min_event->result->inference_time_us.push_back(3500);
505 min_event->result->max_memory_kb = 1234;
506 min_event->result->ok = true;
507 min_event->boottime_us = 1111;
508 min_event->wallclock_us = 2222;
509
510 proto_event = ConvertFromFlatbuffer(event_);
511 EXPECT_TRUE(proto_event.best_acceleration_decision().has_min_latency_event());
512 const auto& proto_min_event =
513 proto_event.best_acceleration_decision().min_latency_event();
514 EXPECT_EQ(proto::BenchmarkEventType::END, proto_min_event.event_type());
515 EXPECT_EQ(proto::Delegate::XNNPACK,
516 proto_min_event.tflite_settings().delegate());
517 EXPECT_EQ(2,
518 proto_min_event.tflite_settings().xnnpack_settings().num_threads());
519 EXPECT_TRUE(proto_min_event.has_result());
520 EXPECT_EQ(2, proto_min_event.result().initialization_time_us_size());
521 EXPECT_EQ(100, proto_min_event.result().initialization_time_us()[0]);
522 EXPECT_EQ(110, proto_min_event.result().initialization_time_us()[1]);
523 EXPECT_EQ(2, proto_min_event.result().inference_time_us_size());
524 EXPECT_EQ(3000, proto_min_event.result().inference_time_us()[0]);
525 EXPECT_EQ(3500, proto_min_event.result().inference_time_us()[1]);
526 EXPECT_EQ(1234, proto_min_event.result().max_memory_kb());
527 EXPECT_TRUE(proto_min_event.result().ok());
528 EXPECT_EQ(1111, proto_min_event.boottime_us());
529 EXPECT_EQ(2222, proto_min_event.wallclock_us());
530 }
531
TEST_F(ConversionTest,BenchmarkInitializationEvent)532 TEST_F(ConversionTest, BenchmarkInitializationEvent) {
533 event_.initialization_failure.reset(new BenchmarkInitializationFailureT());
534 event_.initialization_failure->initialization_status = 101;
535
536 proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
537 EXPECT_FALSE(proto_event.is_log_flushing_event());
538 EXPECT_EQ(101, proto_event.initialization_failure().initialization_status());
539 }
540
TEST_F(ConversionTest,BenchmarkError)541 TEST_F(ConversionTest, BenchmarkError) {
542 event_.benchmark_event.reset(new BenchmarkEventT());
543 event_.benchmark_event->error.reset(new BenchmarkErrorT());
544 auto* error = event_.benchmark_event->error.get();
545 error->stage = BenchmarkStage_INITIALIZATION;
546 error->exit_code = 123;
547 error->signal = 321;
548 error->mini_benchmark_error_code = 456;
549 std::unique_ptr<ErrorCodeT> code1(new ErrorCodeT());
550 code1->source = Delegate_EDGETPU;
551 code1->tflite_error = 3;
552 code1->underlying_api_error = 301;
553 error->error_code.emplace_back(std::move(code1));
554 std::unique_ptr<ErrorCodeT> code2(new ErrorCodeT());
555 code2->source = Delegate_NNAPI;
556 code2->tflite_error = 4;
557 code2->underlying_api_error = 404;
558 error->error_code.emplace_back(std::move(code2));
559
560 const proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
561 const auto& proto_error = proto_event.benchmark_event().error();
562 EXPECT_EQ(proto::BenchmarkStage::INITIALIZATION, proto_error.stage());
563 EXPECT_EQ(123, proto_error.exit_code());
564 EXPECT_EQ(321, proto_error.signal());
565 EXPECT_EQ(456, proto_error.mini_benchmark_error_code());
566 EXPECT_EQ(2, proto_error.error_code_size());
567
568 EXPECT_EQ(proto::Delegate::EDGETPU, proto_error.error_code()[0].source());
569 EXPECT_EQ(3, proto_error.error_code()[0].tflite_error());
570 EXPECT_EQ(301, proto_error.error_code()[0].underlying_api_error());
571
572 EXPECT_EQ(proto::Delegate::NNAPI, proto_error.error_code()[1].source());
573 EXPECT_EQ(4, proto_error.error_code()[1].tflite_error());
574 EXPECT_EQ(404, proto_error.error_code()[1].underlying_api_error());
575 }
576
TEST_F(ConversionTest,BenchmarkMetric)577 TEST_F(ConversionTest, BenchmarkMetric) {
578 event_.benchmark_event.reset(new BenchmarkEventT());
579 event_.benchmark_event->result.reset(new BenchmarkResultT());
580 std::unique_ptr<BenchmarkMetricT> metric(new BenchmarkMetricT());
581 metric->name = "test";
582 metric->values.push_back(1.234);
583 metric->values.push_back(5.678);
584 event_.benchmark_event->result->metrics.emplace_back(std::move(metric));
585
586 const proto::MiniBenchmarkEvent proto_event = ConvertFromFlatbuffer(event_);
587 EXPECT_EQ(1, proto_event.benchmark_event().result().metrics_size());
588 const auto& proto_metric =
589 proto_event.benchmark_event().result().metrics()[0];
590 EXPECT_EQ("test", proto_metric.name());
591 EXPECT_EQ(2, proto_metric.values_size());
592 EXPECT_FLOAT_EQ(1.234, proto_metric.values()[0]);
593 EXPECT_FLOAT_EQ(5.678, proto_metric.values()[1]);
594 }
595 } // namespace
596 } // namespace acceleration
597 } // namespace tflite
598