1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <android-base/scopeguard.h>
19 // android/log.h contains __INTRODUCED_IN() macro and must be included before
20 // sharedmem.h
21 #include <android/log.h>
22 #include <android/sharedmem.h>
23 #include <gtest/gtest.h>
24 #include <sys/mman.h>
25
26 #include <algorithm>
27 #include <future>
28 #include <limits>
29 #include <set>
30 #include <string>
31 #include <utility>
32 #include <vector>
33
34 #include "AndroidVersionUtil.h"
35 #include "NeuralNetworks.h"
36 #include "NeuralNetworksOEM.h"
37 #include "TmpDirectoryUtils.h"
38
39 #ifdef __ANDROID__
40 #include <android/hardware_buffer.h>
41 #else // __ANDROID__
42 #include <android-base/file.h>
43 #endif // __ANDROID__
44
45 #ifndef NNTEST_ONLY_PUBLIC_API
46 #include "NeuralNetworksExtensions.h"
47 #include "TypeManager.h"
48 #endif
49
50 // This file tests all the validations done by the Neural Networks API.
51
52 namespace {
53
54 constexpr uint64_t kShortWaitInNanoseconds = 1'000'000'000; // 1 second
55
56 class ValidationTest : public ::testing::Test {
57 protected:
SetUp()58 virtual void SetUp() {}
59 };
60
61 class ValidationTestModel : public ValidationTest {
62 protected:
SetUp()63 virtual void SetUp() {
64 ValidationTest::SetUp();
65 ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR);
66 }
TearDown()67 virtual void TearDown() {
68 ANeuralNetworksModel_free(mModel);
69 ValidationTest::TearDown();
70 }
71
addScalarOperand(int32_t type=ANEURALNETWORKS_INT32)72 uint32_t addScalarOperand(int32_t type = ANEURALNETWORKS_INT32) {
73 ANeuralNetworksOperandType operandType = {
74 .type = type, .dimensionCount = 0, .dimensions = nullptr};
75 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &operandType), ANEURALNETWORKS_NO_ERROR);
76 return mNumOperands++;
77 }
78
addOperand(const ANeuralNetworksOperandType & operandType)79 uint32_t addOperand(const ANeuralNetworksOperandType& operandType) {
80 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &operandType), ANEURALNETWORKS_NO_ERROR);
81 return mNumOperands++;
82 }
83
addTensorOperand(int32_t type=ANEURALNETWORKS_TENSOR_FLOAT32)84 uint32_t addTensorOperand(int32_t type = ANEURALNETWORKS_TENSOR_FLOAT32) {
85 return addTensorOperand(type, {2});
86 }
87
addTensorOperand(int32_t type,const std::vector<uint32_t> & dimensions)88 uint32_t addTensorOperand(int32_t type, const std::vector<uint32_t>& dimensions) {
89 ANeuralNetworksOperandType operandType = {
90 .type = type,
91 .dimensionCount = static_cast<uint32_t>(dimensions.size()),
92 .dimensions = dimensions.data(),
93 };
94 return addOperand(operandType);
95 }
96
addOperation(ANeuralNetworksOperationType type,const std::vector<uint32_t> & inputs,const std::vector<uint32_t> & outputs)97 int addOperation(ANeuralNetworksOperationType type, const std::vector<uint32_t>& inputs,
98 const std::vector<uint32_t>& outputs) {
99 ++mNumOperations;
100 return ANeuralNetworksModel_addOperation(mModel, type, inputs.size(), inputs.data(),
101 outputs.size(), outputs.data());
102 }
identifyInputsAndOutputs(const std::vector<uint32_t> & inputs,const std::vector<uint32_t> & outputs)103 int identifyInputsAndOutputs(const std::vector<uint32_t>& inputs,
104 const std::vector<uint32_t>& outputs) {
105 return ANeuralNetworksModel_identifyInputsAndOutputs(mModel, inputs.size(), inputs.data(),
106 outputs.size(), outputs.data());
107 }
modelFinish()108 int modelFinish() { return ANeuralNetworksModel_finish(mModel); }
109
createModel()110 virtual void createModel() {
111 addTensorOperand();
112 addTensorOperand();
113 addScalarOperand();
114 addTensorOperand();
115 const std::vector<uint32_t> inList = {0, 1, 2};
116 const std::vector<uint32_t> outList = {3};
117 ASSERT_EQ(addOperation(ANEURALNETWORKS_ADD, inList, outList), ANEURALNETWORKS_NO_ERROR);
118 ASSERT_EQ(identifyInputsAndOutputs(inList, outList), ANEURALNETWORKS_NO_ERROR);
119 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_NO_ERROR);
120 }
121
122 uint32_t mNumOperands = 0;
123 uint32_t mNumOperations = 0;
124 ANeuralNetworksModel* mModel = nullptr;
125
126 const uint32_t kDummyDimensionValue = 1;
127 const ANeuralNetworksOperandType kInvalidTensorType1{
128 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
129 // dimensionCount must be consistent with dimensions.
130 .dimensionCount = 1,
131 .dimensions = nullptr,
132 };
133 const ANeuralNetworksOperandType kInvalidTensorType2{
134 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
135 // dimensionCount must be consistent with dimensions.
136 .dimensionCount = 0,
137 .dimensions = &kDummyDimensionValue,
138 };
139 };
140
141 #ifndef NNTEST_ONLY_PUBLIC_API
142 constexpr const char* kTestExtensionName = "com.android.test_extension";
143 constexpr int32_t kTestExtensionTensorType = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL;
144
145 class ValidationTestModelExtensions : public ValidationTestModel {
146 protected:
SetUp()147 virtual void SetUp() {
148 ValidationTestModel::SetUp();
149 EXPECT_TRUE(::android::nn::TypeManager::get()->forTest_registerExtension({
150 .name = kTestExtensionName,
151 .operandTypes =
152 {
153 {
154 .type = kTestExtensionTensorType,
155 .isTensor = true,
156 .byteSize = 1,
157 },
158 },
159 }));
160 }
161
TearDown()162 virtual void TearDown() {
163 ::android::nn::TypeManager::get()->forTest_reset();
164 ValidationTestModel::TearDown();
165 }
166
getExtensionOperandType(uint16_t typeWithinExtension)167 int32_t getExtensionOperandType(uint16_t typeWithinExtension) {
168 int32_t result;
169 EXPECT_EQ(ANeuralNetworksModel_getExtensionOperandType(mModel, kTestExtensionName,
170 typeWithinExtension, &result),
171 ANEURALNETWORKS_NO_ERROR);
172 return result;
173 }
174 };
175 #endif
176
177 class ValidationTestIdentify : public ValidationTestModel {
SetUp()178 virtual void SetUp() {
179 ValidationTestModel::SetUp();
180
181 uint32_t dimensions[]{1};
182 ANeuralNetworksOperandType tensorType{.type = ANEURALNETWORKS_TENSOR_FLOAT32,
183 .dimensionCount = 1,
184 .dimensions = dimensions};
185 ANeuralNetworksOperandType scalarType{
186 .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
187 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
188 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
189 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &scalarType), ANEURALNETWORKS_NO_ERROR);
190 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
191 ASSERT_EQ(addOperation(ANEURALNETWORKS_ADD, {0, 1, 2}, {3}), ANEURALNETWORKS_NO_ERROR);
192 }
TearDown()193 virtual void TearDown() { ValidationTestModel::TearDown(); }
194 };
195
196 class ValidationTestCompilation : public ValidationTestModel {
197 protected:
SetUp()198 virtual void SetUp() {
199 ValidationTestModel::SetUp();
200 createModel();
201 ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &mCompilation),
202 ANEURALNETWORKS_NO_ERROR);
203 }
204
TearDown()205 virtual void TearDown() {
206 ANeuralNetworksCompilation_free(mCompilation);
207 ValidationTestModel::TearDown();
208 }
209
210 ANeuralNetworksCompilation* mCompilation = nullptr;
211 };
212
213 class ValidationTestExecution : public ValidationTestCompilation {
214 protected:
SetUp()215 virtual void SetUp() {
216 ValidationTestCompilation::SetUp();
217
218 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
219
220 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
221 ANEURALNETWORKS_NO_ERROR);
222 }
TearDown()223 virtual void TearDown() {
224 ANeuralNetworksExecution_free(mExecution);
225 ValidationTestCompilation::TearDown();
226 }
227 ANeuralNetworksExecution* mExecution = nullptr;
228 };
229
230 class ValidationTestBurst : public ValidationTestExecution {
231 protected:
SetUp()232 virtual void SetUp() {
233 ValidationTestExecution::SetUp();
234
235 ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &mBurst), ANEURALNETWORKS_NO_ERROR);
236 }
TearDown()237 virtual void TearDown() {
238 ANeuralNetworksBurst_free(mBurst);
239 ValidationTestExecution::TearDown();
240 }
241 ANeuralNetworksBurst* mBurst = nullptr;
242 };
243
244 class ValidationTestMemoryDesc : public ValidationTestCompilation {
245 protected:
SetUp()246 virtual void SetUp() {
247 ValidationTestCompilation::SetUp();
248 ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&mDesc), ANEURALNETWORKS_NO_ERROR);
249 }
TearDown()250 virtual void TearDown() {
251 ANeuralNetworksMemoryDesc_free(mDesc);
252 for (auto* memory : mMemories) ANeuralNetworksMemory_free(memory);
253 for (int fd : mFds) close(fd);
254 ValidationTestCompilation::TearDown();
255 }
256
createAshmem(uint32_t size)257 ANeuralNetworksMemory* createAshmem(uint32_t size) {
258 #ifdef __ANDROID__
259 int fd = ASharedMemory_create("nnMemory", size);
260 #else // __ANDROID__
261 TemporaryFile tmpFile;
262 int fd = tmpFile.release();
263 CHECK_EQ(ftruncate(fd, size), 0);
264 #endif // __ANDROID__
265 EXPECT_GT(fd, 0);
266 mFds.push_back(fd);
267 ANeuralNetworksMemory* ashmem = nullptr;
268 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(size, PROT_READ | PROT_WRITE, fd, 0, &ashmem),
269 ANEURALNETWORKS_NO_ERROR);
270 mMemories.push_back(ashmem);
271 return ashmem;
272 }
273
274 ANeuralNetworksMemoryDesc* mDesc = nullptr;
275 std::vector<ANeuralNetworksMemory*> mMemories;
276 std::vector<int> mFds;
277 };
278
279 class ValidationTestExecutionDeviceMemory : public ValidationTest {
280 protected:
SetUp()281 virtual void SetUp() {
282 ValidationTest::SetUp();
283 ASSERT_EQ(ANeuralNetworksModel_create(&mModel), ANEURALNETWORKS_NO_ERROR);
284 createModel(mModel, /*dimensionsUnspecified=*/false, /*isValid=*/true);
285 ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &mCompilation),
286 ANEURALNETWORKS_NO_ERROR);
287 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
288 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
289 ANEURALNETWORKS_NO_ERROR);
290
291 ASSERT_EQ(ANeuralNetworksModel_create(&mModelDynamic), ANEURALNETWORKS_NO_ERROR);
292 createModel(mModelDynamic, /*dimensionsUnspecified=*/true, /*isValid=*/true);
293 ASSERT_EQ(ANeuralNetworksCompilation_create(mModelDynamic, &mCompilationDynamic),
294 ANEURALNETWORKS_NO_ERROR);
295 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilationDynamic), ANEURALNETWORKS_NO_ERROR);
296 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilationDynamic, &mExecutionDynamic),
297 ANEURALNETWORKS_NO_ERROR);
298
299 ASSERT_EQ(ANeuralNetworksModel_create(&mInitModel), ANEURALNETWORKS_NO_ERROR);
300 createModel(mInitModel, /*dimensionsUnspecified=*/false, /*isValid=*/true);
301 ASSERT_EQ(ANeuralNetworksCompilation_create(mInitModel, &mInitCompilation),
302 ANEURALNETWORKS_NO_ERROR);
303 ASSERT_EQ(ANeuralNetworksCompilation_finish(mInitCompilation), ANEURALNETWORKS_NO_ERROR);
304
305 ASSERT_EQ(ANeuralNetworksModel_create(&mDeinitModel), ANEURALNETWORKS_NO_ERROR);
306 createModel(mDeinitModel, /*dimensionsUnspecified=*/false, /*isValid=*/false);
307 ASSERT_EQ(ANeuralNetworksCompilation_create(mDeinitModel, &mDeinitCompilation),
308 ANEURALNETWORKS_NO_ERROR);
309 ASSERT_EQ(ANeuralNetworksCompilation_finish(mDeinitCompilation), ANEURALNETWORKS_NO_ERROR);
310 }
TearDown()311 virtual void TearDown() {
312 ANeuralNetworksExecution_free(mExecution);
313 ANeuralNetworksCompilation_free(mCompilation);
314 ANeuralNetworksModel_free(mModel);
315 ANeuralNetworksExecution_free(mExecutionDynamic);
316 ANeuralNetworksCompilation_free(mCompilationDynamic);
317 ANeuralNetworksModel_free(mModelDynamic);
318
319 ANeuralNetworksCompilation_free(mInitCompilation);
320 ANeuralNetworksModel_free(mInitModel);
321 ANeuralNetworksCompilation_free(mDeinitCompilation);
322 ANeuralNetworksModel_free(mDeinitModel);
323
324 ValidationTest::TearDown();
325 }
326
addScalarOperand(ANeuralNetworksModel * model)327 void addScalarOperand(ANeuralNetworksModel* model) {
328 ANeuralNetworksOperandType operandType = {
329 .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
330 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &operandType), ANEURALNETWORKS_NO_ERROR);
331 }
332
addTensorOperand(ANeuralNetworksModel * model,bool dimensionsUnspecified)333 void addTensorOperand(ANeuralNetworksModel* model, bool dimensionsUnspecified) {
334 uint32_t dimension = dimensionsUnspecified ? 0 : 1;
335 ANeuralNetworksOperandType operandType = {
336 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
337 .dimensionCount = 1,
338 .dimensions = &dimension,
339 };
340 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &operandType), ANEURALNETWORKS_NO_ERROR);
341 }
342
createModel(ANeuralNetworksModel * model,bool dimensionsUnspecified,bool isValid)343 void createModel(ANeuralNetworksModel* model, bool dimensionsUnspecified, bool isValid) {
344 const float constData = 0;
345 const uint32_t actData = isValid ? 0 : 999;
346
347 addTensorOperand(model, dimensionsUnspecified);
348 addTensorOperand(model, /*dimensionsUnspecified=*/false);
349 addScalarOperand(model);
350 addTensorOperand(model, dimensionsUnspecified);
351
352 ASSERT_EQ(ANeuralNetworksModel_setOperandValue(model, 1, &constData, sizeof(float)),
353 ANEURALNETWORKS_NO_ERROR);
354 ASSERT_EQ(ANeuralNetworksModel_setOperandValue(model, 2, &actData, sizeof(uint32_t)),
355 ANEURALNETWORKS_NO_ERROR);
356
357 uint32_t inList[] = {0, 1, 2}, outList[] = {3};
358 ASSERT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_ADD, 3, inList, 1,
359 outList),
360 ANEURALNETWORKS_NO_ERROR);
361 uint32_t inputList[] = {0}, outputList[] = {3};
362 ASSERT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, 1, inputList, 1, outputList),
363 ANEURALNETWORKS_NO_ERROR);
364 ASSERT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
365 }
366
executeWithMemoryAsInput(ANeuralNetworksCompilation * compilation,ANeuralNetworksMemory * memory,int expectedResult)367 void executeWithMemoryAsInput(ANeuralNetworksCompilation* compilation,
368 ANeuralNetworksMemory* memory, int expectedResult) {
369 float data = 0;
370 ANeuralNetworksExecution* execution = nullptr;
371 ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution),
372 ANEURALNETWORKS_NO_ERROR);
373 ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, 0),
374 ANEURALNETWORKS_NO_ERROR);
375 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &data, sizeof(float)),
376 ANEURALNETWORKS_NO_ERROR);
377 ASSERT_EQ(ANeuralNetworksExecution_compute(execution), expectedResult);
378 ANeuralNetworksExecution_free(execution);
379 }
380
executeWithMemoryAsOutput(ANeuralNetworksCompilation * compilation,ANeuralNetworksMemory * memory,int expectedResult)381 void executeWithMemoryAsOutput(ANeuralNetworksCompilation* compilation,
382 ANeuralNetworksMemory* memory, int expectedResult) {
383 const float data = 0;
384 ANeuralNetworksExecution* execution = nullptr;
385 ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution),
386 ANEURALNETWORKS_NO_ERROR);
387 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &data, sizeof(float)),
388 ANEURALNETWORKS_NO_ERROR);
389 ASSERT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 0),
390 ANEURALNETWORKS_NO_ERROR);
391 ASSERT_EQ(ANeuralNetworksExecution_compute(execution), expectedResult);
392 ANeuralNetworksExecution_free(execution);
393 }
394
395 ANeuralNetworksModel* mModel = nullptr;
396 ANeuralNetworksCompilation* mCompilation = nullptr;
397 ANeuralNetworksExecution* mExecution = nullptr;
398
399 ANeuralNetworksModel* mModelDynamic = nullptr;
400 ANeuralNetworksCompilation* mCompilationDynamic = nullptr;
401 ANeuralNetworksExecution* mExecutionDynamic = nullptr;
402
403 ANeuralNetworksModel* mInitModel = nullptr;
404 ANeuralNetworksCompilation* mInitCompilation = nullptr;
405 ANeuralNetworksModel* mDeinitModel = nullptr;
406 ANeuralNetworksCompilation* mDeinitCompilation = nullptr;
407 };
408
TEST_F(ValidationTest,CreateModel)409 TEST_F(ValidationTest, CreateModel) {
410 EXPECT_EQ(ANeuralNetworksModel_create(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
411 }
412
TEST_F(ValidationTestModel,AddOperand)413 TEST_F(ValidationTestModel, AddOperand) {
414 ANeuralNetworksOperandType floatType{
415 .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
416 EXPECT_EQ(ANeuralNetworksModel_addOperand(nullptr, &floatType),
417 ANEURALNETWORKS_UNEXPECTED_NULL);
418 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
419
420 ANeuralNetworksOperandType quant8TypeInvalidScale{
421 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
422 .dimensionCount = 0,
423 .dimensions = nullptr,
424 // Scale has to be non-negative
425 .scale = -1.0f,
426 .zeroPoint = 0,
427 };
428 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &quant8TypeInvalidScale),
429 ANEURALNETWORKS_BAD_DATA);
430
431 ANeuralNetworksOperandType quant8TypeInvalidZeroPoint{
432 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
433 .dimensionCount = 0,
434 .dimensions = nullptr,
435 .scale = 1.0f,
436 // zeroPoint has to be in [0, 255]
437 .zeroPoint = -1,
438 };
439 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &quant8TypeInvalidZeroPoint),
440 ANEURALNETWORKS_BAD_DATA);
441
442 const uint32_t dim = 2;
443 ANeuralNetworksOperandType invalidScalarType{
444 .type = ANEURALNETWORKS_INT32,
445 // a scalar type must have 0 dimensions.
446 .dimensionCount = 1,
447 .dimensions = &dim,
448 };
449 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &invalidScalarType),
450 ANEURALNETWORKS_BAD_DATA);
451
452 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &kInvalidTensorType1),
453 ANEURALNETWORKS_BAD_DATA);
454 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &kInvalidTensorType2),
455 ANEURALNETWORKS_BAD_DATA);
456
457 modelFinish();
458 // This should fail, as the model is already finished.
459 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_BAD_STATE);
460 }
461
TEST_F(ValidationTestModel,SetOperandSymmPerChannelQuantParams)462 TEST_F(ValidationTestModel, SetOperandSymmPerChannelQuantParams) {
463 const int32_t operandIndex = addTensorOperand(ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL);
464
465 float scales[2] = {1.0, 2.0};
466 ANeuralNetworksSymmPerChannelQuantParams channelQuant = {
467 .channelDim = 0,
468 .scaleCount = 2,
469 .scales = scales,
470 };
471
472 EXPECT_EQ(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(nullptr, operandIndex,
473 &channelQuant),
474 ANEURALNETWORKS_UNEXPECTED_NULL);
475 EXPECT_EQ(
476 ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(mModel, operandIndex, nullptr),
477 ANEURALNETWORKS_UNEXPECTED_NULL);
478 EXPECT_EQ(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(mModel, operandIndex + 1,
479 &channelQuant),
480 ANEURALNETWORKS_BAD_DATA);
481 EXPECT_EQ(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(mModel, operandIndex,
482 &channelQuant),
483 ANEURALNETWORKS_NO_ERROR);
484 }
485
486 #ifndef NNTEST_ONLY_PUBLIC_API
TEST_F(ValidationTestModelExtensions,AddOperand_UnknownPrefix)487 TEST_F(ValidationTestModelExtensions, AddOperand_UnknownPrefix) {
488 ANeuralNetworksOperandType type = {.type = -1};
489 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &type), ANEURALNETWORKS_BAD_DATA);
490 }
491
TEST_F(ValidationTestModelExtensions,SetOperandSymmPerChannelQuantParams_ExtensionOperand)492 TEST_F(ValidationTestModelExtensions, SetOperandSymmPerChannelQuantParams_ExtensionOperand) {
493 const int32_t operandIndex =
494 addTensorOperand(getExtensionOperandType(kTestExtensionTensorType));
495
496 float scales[2] = {1.0, 2.0};
497 ANeuralNetworksSymmPerChannelQuantParams channelQuant = {
498 .channelDim = 0,
499 .scaleCount = 2,
500 .scales = scales,
501 };
502
503 EXPECT_EQ(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(mModel, operandIndex,
504 &channelQuant),
505 ANEURALNETWORKS_BAD_DATA);
506 }
507
TEST_F(ValidationTestModelExtensions,SetOperandExtensionData)508 TEST_F(ValidationTestModelExtensions, SetOperandExtensionData) {
509 const int32_t operandIndex =
510 addTensorOperand(getExtensionOperandType(kTestExtensionTensorType));
511 const int32_t data = 42;
512 const size_t dataLength = sizeof(data);
513 EXPECT_EQ(
514 ANeuralNetworksModel_setOperandExtensionData(nullptr, operandIndex, &data, dataLength),
515 ANEURALNETWORKS_UNEXPECTED_NULL);
516 EXPECT_EQ(
517 ANeuralNetworksModel_setOperandExtensionData(mModel, operandIndex, nullptr, dataLength),
518 ANEURALNETWORKS_UNEXPECTED_NULL);
519 EXPECT_EQ(ANeuralNetworksModel_setOperandExtensionData(mModel, operandIndex, &data, 0),
520 ANEURALNETWORKS_BAD_DATA);
521 EXPECT_EQ(ANeuralNetworksModel_setOperandExtensionData(mModel, operandIndex + 1, &data,
522 dataLength),
523 ANEURALNETWORKS_BAD_DATA);
524 EXPECT_EQ(ANeuralNetworksModel_setOperandExtensionData(mModel, operandIndex, &data, dataLength),
525 ANEURALNETWORKS_NO_ERROR);
526 }
527
TEST_F(ValidationTestModelExtensions,SetOperandExtensionData_Empty)528 TEST_F(ValidationTestModelExtensions, SetOperandExtensionData_Empty) {
529 const int32_t operandIndex =
530 addTensorOperand(getExtensionOperandType(kTestExtensionTensorType));
531 EXPECT_EQ(ANeuralNetworksModel_setOperandExtensionData(mModel, operandIndex, nullptr, 0),
532 ANEURALNETWORKS_NO_ERROR);
533 }
534
TEST_F(ValidationTestModelExtensions,SetOperandExtensionData_NonExtensionOperand)535 TEST_F(ValidationTestModelExtensions, SetOperandExtensionData_NonExtensionOperand) {
536 const int32_t operandIndex = addTensorOperand();
537 const int32_t data = 42;
538 const size_t dataLength = sizeof(data);
539 EXPECT_EQ(ANeuralNetworksModel_setOperandExtensionData(mModel, operandIndex, &data, dataLength),
540 ANEURALNETWORKS_BAD_DATA);
541 }
542
TEST_F(ValidationTestModelExtensions,SetOperandValue_UnspecifiedDimension)543 TEST_F(ValidationTestModelExtensions, SetOperandValue_UnspecifiedDimension) {
544 const uint32_t dimensions[2] = {3, 0};
545 ANeuralNetworksOperandType type = {
546 .type = getExtensionOperandType(kTestExtensionTensorType),
547 .dimensionCount = 2,
548 .dimensions = dimensions,
549 };
550 const int32_t operandIndex = addOperand(type);
551 char buffer[20];
552 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, operandIndex, buffer, sizeof(buffer)),
553 ANEURALNETWORKS_BAD_DATA);
554 }
555
TEST_F(ValidationTestModelExtensions,SetOperandValue_UnspecifiedRank)556 TEST_F(ValidationTestModelExtensions, SetOperandValue_UnspecifiedRank) {
557 ANeuralNetworksOperandType type = {
558 .type = getExtensionOperandType(kTestExtensionTensorType),
559 .dimensionCount = 0,
560 .dimensions = nullptr,
561 };
562 const int32_t operandIndex = addOperand(type);
563 char buffer[20];
564 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, operandIndex, buffer, sizeof(buffer)),
565 ANEURALNETWORKS_BAD_DATA);
566 }
567
TEST_F(ValidationTestModelExtensions,AddOperandDimensionProductOverflow)568 TEST_F(ValidationTestModelExtensions, AddOperandDimensionProductOverflow) {
569 uint32_t dimensions[] = {5, 4, 4, 786433, 5, 3, 16777216, 4, 5};
570 ANeuralNetworksOperandType operandType = {
571 .type = getExtensionOperandType(kTestExtensionTensorType),
572 .dimensionCount = std::size(dimensions),
573 .dimensions = dimensions,
574 };
575 // This should fail, as the operand type's dimension product overflows uint32_t.
576 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &operandType), ANEURALNETWORKS_BAD_DATA);
577 }
578 #endif
579
TEST_F(ValidationTestModel,SetOptionalOperand)580 TEST_F(ValidationTestModel, SetOptionalOperand) {
581 ANeuralNetworksOperandType floatType{
582 .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
583 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
584
585 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, nullptr, 0),
586 ANEURALNETWORKS_NO_ERROR);
587 }
588
TEST_F(ValidationTestModel,SetOperandValue)589 TEST_F(ValidationTestModel, SetOperandValue) {
590 ANeuralNetworksOperandType floatType{
591 .type = ANEURALNETWORKS_FLOAT32, .dimensionCount = 0, .dimensions = nullptr};
592 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
593
594 char buffer[20];
595 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(nullptr, 0, buffer, sizeof(buffer)),
596 ANEURALNETWORKS_UNEXPECTED_NULL);
597 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, nullptr, sizeof(buffer)),
598 ANEURALNETWORKS_UNEXPECTED_NULL);
599
600 // This should fail, because buffer is not the size of a float32.
601 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(buffer)),
602 ANEURALNETWORKS_BAD_DATA);
603
604 // This should succeed.
605 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(float)),
606 ANEURALNETWORKS_NO_ERROR);
607
608 // This should fail, as this operand does not exist.
609 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 1, buffer, sizeof(float)),
610 ANEURALNETWORKS_BAD_DATA);
611
612 modelFinish();
613 // This should fail, as the model is already finished.
614 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(float)),
615 ANEURALNETWORKS_BAD_STATE);
616 }
617
TEST_F(ValidationTestModel,SetOperandValueFromMemory)618 TEST_F(ValidationTestModel, SetOperandValueFromMemory) {
619 uint32_t dimensions[]{1};
620 ANeuralNetworksOperandType floatType{
621 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
622 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
623
624 const size_t memorySize = 20;
625 #ifdef __ANDROID__
626 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
627 #else // __ANDROID__
628 TemporaryFile tmpFile;
629 int memoryFd = tmpFile.release();
630 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
631 #endif // __ANDROID__
632 ASSERT_GT(memoryFd, 0);
633
634 ANeuralNetworksMemory* memory;
635 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
636 &memory),
637 ANEURALNETWORKS_NO_ERROR);
638
639 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(nullptr, 0, memory, 0, sizeof(float)),
640 ANEURALNETWORKS_UNEXPECTED_NULL);
641 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, nullptr, 0, sizeof(float)),
642 ANEURALNETWORKS_UNEXPECTED_NULL);
643
644 // This should fail, because the operand does not exist.
645 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, -1, memory, 0, sizeof(float)),
646 ANEURALNETWORKS_BAD_DATA);
647
648 // This should fail, because memory is not the size of a float32.
649 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, memorySize),
650 ANEURALNETWORKS_BAD_DATA);
651
652 // This should fail, as this operand does not exist.
653 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 1, memory, 0, sizeof(float)),
654 ANEURALNETWORKS_BAD_DATA);
655
656 // This should fail, because offset is larger than memorySize.
657 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize + 1,
658 sizeof(float)),
659 ANEURALNETWORKS_BAD_DATA);
660
661 // This should fail, because requested size is larger than the memory.
662 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize - 3,
663 sizeof(float)),
664 ANEURALNETWORKS_BAD_DATA);
665
666 modelFinish();
667 // This should fail, as the model is already finished.
668 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, sizeof(float)),
669 ANEURALNETWORKS_BAD_STATE);
670
671 // close memory
672 ANeuralNetworksMemory_free(memory);
673 close(memoryFd);
674 }
675
676 #ifdef __ANDROID__
TEST_F(ValidationTestModel,SetOperandValueFromAHardwareBuffer)677 TEST_F(ValidationTestModel, SetOperandValueFromAHardwareBuffer) {
678 uint32_t dimensions[]{1};
679 ANeuralNetworksOperandType quant8Type{.type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
680 .dimensionCount = 1,
681 .dimensions = dimensions,
682 .scale = 1.0,
683 .zeroPoint = 0};
684 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &quant8Type), ANEURALNETWORKS_NO_ERROR);
685
686 AHardwareBuffer_Desc desc{
687 .width = 16,
688 .height = 16,
689 .layers = 1,
690 .format = AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM,
691 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
692 };
693
694 AHardwareBuffer* buffer = nullptr;
695 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
696
697 ANeuralNetworksMemory* memory;
698 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory),
699 ANEURALNETWORKS_NO_ERROR);
700
701 // This should fail, because non-BLOB AHardwareBuffer is not allowed.
702 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, 0, sizeof(uint8_t)),
703 ANEURALNETWORKS_BAD_DATA);
704
705 // close memory
706 ANeuralNetworksMemory_free(memory);
707 AHardwareBuffer_release(buffer);
708 }
709
TEST_F(ValidationTestModel,SetOperandValueFromAHardwareBufferBlob)710 TEST_F(ValidationTestModel, SetOperandValueFromAHardwareBufferBlob) {
711 uint32_t dimensions[]{1};
712 ANeuralNetworksOperandType floatType{
713 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
714 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &floatType), ANEURALNETWORKS_NO_ERROR);
715
716 const size_t memorySize = 20;
717 AHardwareBuffer_Desc desc{
718 .width = memorySize,
719 .height = 1,
720 .layers = 1,
721 .format = AHARDWAREBUFFER_FORMAT_BLOB,
722 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
723 };
724
725 AHardwareBuffer* buffer = nullptr;
726 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
727
728 ANeuralNetworksMemory* memory;
729 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory),
730 ANEURALNETWORKS_NO_ERROR);
731
732 // This should fail, because offset is larger than memorySize.
733 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize + 1,
734 sizeof(float)),
735 ANEURALNETWORKS_BAD_DATA);
736
737 // This should fail, because requested size is larger than the memory.
738 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromMemory(mModel, 0, memory, memorySize - 3,
739 sizeof(float)),
740 ANEURALNETWORKS_BAD_DATA);
741
742 // close memory
743 ANeuralNetworksMemory_free(memory);
744 AHardwareBuffer_release(buffer);
745 }
746 #endif // __ANDROID__
747
TEST_F(ValidationTestModel,SetOperandValueFromModel)748 TEST_F(ValidationTestModel, SetOperandValueFromModel) {
749 uint32_t dimensions[] = {2};
750 ANeuralNetworksOperandType tensorType = {
751 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
752 .dimensionCount = std::size(dimensions),
753 .dimensions = dimensions,
754 };
755 ANeuralNetworksOperandType scalarType = {.type = ANEURALNETWORKS_INT32};
756 ANeuralNetworksOperandType modelType = {.type = ANEURALNETWORKS_MODEL};
757
758 ANeuralNetworksModel* valueModel = nullptr;
759 ASSERT_EQ(ANeuralNetworksModel_create(&valueModel), ANEURALNETWORKS_NO_ERROR);
760 ASSERT_EQ(ANeuralNetworksModel_addOperand(valueModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
761 ASSERT_EQ(ANeuralNetworksModel_addOperand(valueModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
762 ASSERT_EQ(ANeuralNetworksModel_addOperand(valueModel, &scalarType), ANEURALNETWORKS_NO_ERROR);
763 ASSERT_EQ(ANeuralNetworksModel_addOperand(valueModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
764 uint32_t inList[3] = {0, 1, 2};
765 uint32_t outList[1] = {3};
766 ASSERT_EQ(ANeuralNetworksModel_addOperation(valueModel, ANEURALNETWORKS_ADD, 3, inList, 1,
767 outList),
768 ANEURALNETWORKS_NO_ERROR);
769 ASSERT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(valueModel, 3, inList, 1, outList),
770 ANEURALNETWORKS_NO_ERROR);
771
772 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &modelType), ANEURALNETWORKS_NO_ERROR);
773
774 // This should fail, as the value model is not finished.
775 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, 0, valueModel),
776 ANEURALNETWORKS_BAD_STATE);
777 ANeuralNetworksModel_finish(valueModel);
778
779 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(nullptr, 0, valueModel),
780 ANEURALNETWORKS_UNEXPECTED_NULL);
781 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, 0, nullptr),
782 ANEURALNETWORKS_UNEXPECTED_NULL);
783
784 // This should fail, because the operand does not exist.
785 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, -1, valueModel),
786 ANEURALNETWORKS_BAD_DATA);
787
788 // This should fail, as this operand does not exist.
789 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, 1, valueModel),
790 ANEURALNETWORKS_BAD_DATA);
791
792 modelFinish();
793 // This should fail, as the model is already finished.
794 EXPECT_EQ(ANeuralNetworksModel_setOperandValueFromModel(mModel, 0, valueModel),
795 ANEURALNETWORKS_BAD_STATE);
796
797 ANeuralNetworksModel_free(valueModel);
798 }
799
TEST_F(ValidationTestModel,AddOEMOperand)800 TEST_F(ValidationTestModel, AddOEMOperand) {
801 ANeuralNetworksOperandType OEMScalarType{
802 .type = ANEURALNETWORKS_OEM_SCALAR, .dimensionCount = 0, .dimensions = nullptr};
803 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMScalarType), ANEURALNETWORKS_NO_ERROR);
804 char buffer[20];
805 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 0, buffer, sizeof(buffer)),
806 ANEURALNETWORKS_NO_ERROR);
807
808 const size_t kByteSizeOfOEMTensor = 4;
809 uint32_t dimensions[]{kByteSizeOfOEMTensor};
810 ANeuralNetworksOperandType OEMTensorType{
811 .type = ANEURALNETWORKS_TENSOR_OEM_BYTE, .dimensionCount = 1, .dimensions = dimensions};
812 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMTensorType), ANEURALNETWORKS_NO_ERROR);
813 EXPECT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 1, buffer, kByteSizeOfOEMTensor),
814 ANEURALNETWORKS_NO_ERROR);
815
816 modelFinish();
817 // This should fail, as the model is already finished.
818 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMTensorType), ANEURALNETWORKS_BAD_STATE);
819 }
820
TEST_F(ValidationTestModel,AddOperation)821 TEST_F(ValidationTestModel, AddOperation) {
822 uint32_t input = 0;
823 uint32_t output = 0;
824 EXPECT_EQ(ANeuralNetworksModel_addOperation(nullptr, ANEURALNETWORKS_AVERAGE_POOL_2D, 1, &input,
825 1, &output),
826 ANEURALNETWORKS_UNEXPECTED_NULL);
827 EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, 0, nullptr,
828 1, &output),
829 ANEURALNETWORKS_UNEXPECTED_NULL);
830 EXPECT_EQ(ANeuralNetworksModel_addOperation(mModel, ANEURALNETWORKS_AVERAGE_POOL_2D, 1, &input,
831 0, nullptr),
832 ANEURALNETWORKS_UNEXPECTED_NULL);
833
834 ANeuralNetworksOperationType invalidOp = -1;
835 EXPECT_EQ(addOperation(invalidOp, {input}, {output}), ANEURALNETWORKS_BAD_DATA);
836
837 modelFinish();
838 // This should fail, as the model is already finished.
839 EXPECT_EQ(addOperation(ANEURALNETWORKS_AVERAGE_POOL_2D, {input}, {output}),
840 ANEURALNETWORKS_BAD_STATE);
841 }
842
TEST_F(ValidationTestModel,IdentifyInputsAndOutputs)843 TEST_F(ValidationTestModel, IdentifyInputsAndOutputs) {
844 uint32_t input = 0;
845 uint32_t output = 0;
846 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(nullptr, 1, &input, 1, &output),
847 ANEURALNETWORKS_UNEXPECTED_NULL);
848 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(mModel, 0, nullptr, 1, &output),
849 ANEURALNETWORKS_UNEXPECTED_NULL);
850 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(mModel, 1, &input, 0, nullptr),
851 ANEURALNETWORKS_UNEXPECTED_NULL);
852
853 createModel();
854 // This should fail, as the model is already finished.
855 EXPECT_EQ(identifyInputsAndOutputs({input}, {output}), ANEURALNETWORKS_BAD_STATE);
856 }
857
TEST_F(ValidationTestModel,RelaxComputationFloat32toFloat16)858 TEST_F(ValidationTestModel, RelaxComputationFloat32toFloat16) {
859 EXPECT_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16(nullptr, true),
860 ANEURALNETWORKS_UNEXPECTED_NULL);
861
862 createModel();
863 // This should fail, as the model is already finished.
864 EXPECT_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, true),
865 ANEURALNETWORKS_BAD_STATE);
866 EXPECT_EQ(ANeuralNetworksModel_relaxComputationFloat32toFloat16(mModel, false),
867 ANEURALNETWORKS_BAD_STATE);
868 }
869
TEST_F(ValidationTestModel,Finish)870 TEST_F(ValidationTestModel, Finish) {
871 EXPECT_EQ(ANeuralNetworksModel_finish(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
872 createModel();
873 EXPECT_EQ(modelFinish(), ANEURALNETWORKS_BAD_STATE);
874 }
875
TEST_F(ValidationTestModel,EmptyModel)876 TEST_F(ValidationTestModel, EmptyModel) {
877 // An empty model is invalid
878 EXPECT_EQ(modelFinish(), ANEURALNETWORKS_BAD_DATA);
879 }
880
TEST_F(ValidationTestModel,CreateCompilation)881 TEST_F(ValidationTestModel, CreateCompilation) {
882 ANeuralNetworksCompilation* compilation = nullptr;
883 EXPECT_EQ(ANeuralNetworksCompilation_create(nullptr, &compilation),
884 ANEURALNETWORKS_UNEXPECTED_NULL);
885 EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
886 EXPECT_EQ(ANeuralNetworksCompilation_create(mModel, &compilation), ANEURALNETWORKS_BAD_STATE);
887 }
888
TEST_F(ValidationTestModel,CreateCompilationForDevices)889 TEST_F(ValidationTestModel, CreateCompilationForDevices) {
890 createModel();
891 uint32_t numDevices = 0;
892 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
893
894 if (numDevices > 0) {
895 ANeuralNetworksDevice* device;
896 EXPECT_EQ(ANeuralNetworks_getDevice(0, &device), ANEURALNETWORKS_NO_ERROR);
897 ANeuralNetworksCompilation* compilation = nullptr;
898 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(nullptr, &device, 1, &compilation),
899 ANEURALNETWORKS_UNEXPECTED_NULL);
900 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, &device, 1, nullptr),
901 ANEURALNETWORKS_UNEXPECTED_NULL);
902
903 // empty device list
904 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, &device, 0, &compilation),
905 ANEURALNETWORKS_BAD_DATA);
906
907 // duplicate devices in the list.
908 ANeuralNetworksDevice* invalidDevices[2] = {device, device};
909 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, invalidDevices, 2,
910 &compilation),
911 ANEURALNETWORKS_BAD_DATA);
912 // nullptr in the list.
913 invalidDevices[1] = nullptr;
914 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, invalidDevices, 2,
915 &compilation),
916 ANEURALNETWORKS_UNEXPECTED_NULL);
917 }
918
919 ANeuralNetworksCompilation* compilation = nullptr;
920 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(nullptr, nullptr, 1, &compilation),
921 ANEURALNETWORKS_UNEXPECTED_NULL);
922 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, nullptr, 1, nullptr),
923 ANEURALNETWORKS_UNEXPECTED_NULL);
924 EXPECT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, nullptr, 1, &compilation),
925 ANEURALNETWORKS_UNEXPECTED_NULL);
926 }
927
TEST_F(ValidationTestModel,GetSupportedOperationsForDevices)928 TEST_F(ValidationTestModel, GetSupportedOperationsForDevices) {
929 createModel();
930 uint32_t numDevices = 0;
931 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
932
933 bool supportedOps[20];
934 ASSERT_LE(mNumOperations, sizeof(supportedOps) / sizeof(supportedOps[0]));
935 if (numDevices > 0) {
936 ANeuralNetworksDevice* device;
937 EXPECT_EQ(ANeuralNetworks_getDevice(0, &device), ANEURALNETWORKS_NO_ERROR);
938 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(nullptr, &device, 1,
939 supportedOps),
940 ANEURALNETWORKS_UNEXPECTED_NULL);
941 EXPECT_EQ(
942 ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, &device, 1, nullptr),
943 ANEURALNETWORKS_UNEXPECTED_NULL);
944
945 // empty device list
946 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, &device, 0,
947 supportedOps),
948 ANEURALNETWORKS_BAD_DATA);
949
950 // duplicate devices in the list.
951 ANeuralNetworksDevice* invalidDevices[2] = {device, device};
952 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, invalidDevices, 2,
953 supportedOps),
954 ANEURALNETWORKS_BAD_DATA);
955 // nullptr in the list.
956 invalidDevices[1] = nullptr;
957 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, invalidDevices, 2,
958 supportedOps),
959 ANEURALNETWORKS_UNEXPECTED_NULL);
960 }
961
962 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(nullptr, nullptr, 1,
963 supportedOps),
964 ANEURALNETWORKS_UNEXPECTED_NULL);
965 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, nullptr, 1, nullptr),
966 ANEURALNETWORKS_UNEXPECTED_NULL);
967 EXPECT_EQ(
968 ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, nullptr, 1, supportedOps),
969 ANEURALNETWORKS_UNEXPECTED_NULL);
970 }
971
TEST_F(ValidationTestModel,Cycle)972 TEST_F(ValidationTestModel, Cycle) {
973 uint32_t dimensions[]{1};
974 ANeuralNetworksOperandType tensorType{
975 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
976 ANeuralNetworksOperandType scalarType{
977 .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
978
979 // opnd0 = model input TENSOR_FLOAT32
980 // opnd1 = model input TENSOR_FLOAT32
981 // opnd2 = model input INT32
982 // opnd3 = ADD(opnd0, opnd4, opnd2)
983 // opnd4 = ADD(opnd1, opnd3, opnd2)
984 // opnd5 = ADD(opnd4, opnd0, opnd2) // model output
985 //
986 // +-----+
987 // | |
988 // v |
989 // 3 = ADD(0, 4, 2) |
990 // | |
991 // +----------+ |
992 // | |
993 // v |
994 // 4 = ADD(1, 3, 2) |
995 // | |
996 // +----------------+
997 // |
998 // |
999 // +-------+
1000 // |
1001 // v
1002 // 5 = ADD(4, 0, 2)
1003
1004 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1005 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1006 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &scalarType), ANEURALNETWORKS_NO_ERROR);
1007 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1008 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1009 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1010
1011 ASSERT_EQ(addOperation(ANEURALNETWORKS_ADD, {0, 4, 2}, {3}), ANEURALNETWORKS_NO_ERROR);
1012 ASSERT_EQ(addOperation(ANEURALNETWORKS_ADD, {1, 3, 2}, {4}), ANEURALNETWORKS_NO_ERROR);
1013 ASSERT_EQ(addOperation(ANEURALNETWORKS_ADD, {4, 0, 2}, {5}), ANEURALNETWORKS_NO_ERROR);
1014
1015 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2}, {5}), ANEURALNETWORKS_NO_ERROR);
1016 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_BAD_DATA);
1017 }
1018
TEST_F(ValidationTestModel,AcyclicReadBeforeWrite)1019 TEST_F(ValidationTestModel, AcyclicReadBeforeWrite) {
1020 uint32_t dimensions[]{1};
1021 ANeuralNetworksOperandType tensorType{
1022 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
1023
1024 // opnd0 = TENSOR_FLOAT32 // model input
1025 // opnd1 = LOGISTIC(opnd2) // model output
1026 // opnd2 = LOGISTIC(opnd0)
1027 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1028 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1029 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1030
1031 ASSERT_EQ(addOperation(ANEURALNETWORKS_LOGISTIC, {2}, {1}), ANEURALNETWORKS_NO_ERROR);
1032 ASSERT_EQ(addOperation(ANEURALNETWORKS_LOGISTIC, {0}, {2}), ANEURALNETWORKS_NO_ERROR);
1033 ASSERT_EQ(identifyInputsAndOutputs({0}, {1}), ANEURALNETWORKS_NO_ERROR);
1034
1035 // This should succeed, because NN API doesn't require that operations be sorted.
1036 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_NO_ERROR);
1037 }
1038
TEST_F(ValidationTestModel,MissingWrite)1039 TEST_F(ValidationTestModel, MissingWrite) {
1040 uint32_t dimensions[]{1};
1041 ANeuralNetworksOperandType tensorType{
1042 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
1043
1044 // opnd0 = TENSOR_FLOAT32 // model input
1045 // opnd1 = TENSOR_FLOAT32 // never written
1046 // opnd2 = LOGISTIC(opnd1) // model output
1047 // opnd3 = LOGISTIC(opnd0) // model output
1048 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1049 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1050 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1051 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1052
1053 ASSERT_EQ(addOperation(ANEURALNETWORKS_LOGISTIC, {1}, {2}), ANEURALNETWORKS_NO_ERROR);
1054 ASSERT_EQ(addOperation(ANEURALNETWORKS_LOGISTIC, {0}, {3}), ANEURALNETWORKS_NO_ERROR);
1055 ASSERT_EQ(identifyInputsAndOutputs({0}, {2, 3}), ANEURALNETWORKS_NO_ERROR);
1056
1057 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_BAD_DATA);
1058 }
1059
TEST_F(ValidationTestModel,UnwrittenOperand)1060 TEST_F(ValidationTestModel, UnwrittenOperand) {
1061 uint32_t dimensions[]{1};
1062 ANeuralNetworksOperandType tensorType{
1063 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
1064
1065 // opnd0 = TENSOR_FLOAT32 // model input
1066 // opnd1 = TENSOR_FLOAT32 // never written
1067 // opnd2 = LOGISTIC(opnd0) // model output
1068 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1069 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1070 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1071
1072 ASSERT_EQ(addOperation(ANEURALNETWORKS_LOGISTIC, {0}, {2}), ANEURALNETWORKS_NO_ERROR);
1073 ASSERT_EQ(identifyInputsAndOutputs({0}, {2}), ANEURALNETWORKS_NO_ERROR);
1074
1075 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_BAD_DATA);
1076 }
1077
TEST_F(ValidationTestModel,MultipleWrite)1078 TEST_F(ValidationTestModel, MultipleWrite) {
1079 uint32_t dimensions[]{1};
1080 ANeuralNetworksOperandType tensorType{
1081 .type = ANEURALNETWORKS_TENSOR_FLOAT32, .dimensionCount = 1, .dimensions = dimensions};
1082 ANeuralNetworksOperandType scalarType{
1083 .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
1084
1085 // opnd0 = TENSOR_FLOAT32 // model input
1086 // opnd1 = INT32 // model input
1087 // opnd2 = ADD(opnd0, opnd0, opnd1) // model output; do this twice
1088 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1089 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &scalarType), ANEURALNETWORKS_NO_ERROR);
1090 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &tensorType), ANEURALNETWORKS_NO_ERROR);
1091
1092 for (int i = 0; i < 2; ++i) {
1093 SCOPED_TRACE(i);
1094 ASSERT_EQ(addOperation(ANEURALNETWORKS_ADD, {0, 0, 1}, {2}), ANEURALNETWORKS_NO_ERROR);
1095 }
1096
1097 ASSERT_EQ(identifyInputsAndOutputs({0, 1}, {2}), ANEURALNETWORKS_NO_ERROR);
1098 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_BAD_DATA);
1099 }
1100
TEST_F(ValidationTestIdentify,Ok)1101 TEST_F(ValidationTestIdentify, Ok) {
1102 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2}, {3}), ANEURALNETWORKS_NO_ERROR);
1103 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_NO_ERROR);
1104 }
1105
TEST_F(ValidationTestIdentify,InputIsOutput)1106 TEST_F(ValidationTestIdentify, InputIsOutput) {
1107 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2}, {3, 0}), ANEURALNETWORKS_BAD_DATA);
1108 }
1109
TEST_F(ValidationTestIdentify,OutputIsInput)1110 TEST_F(ValidationTestIdentify, OutputIsInput) {
1111 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2, 3}, {3}), ANEURALNETWORKS_BAD_DATA);
1112 }
1113
TEST_F(ValidationTestIdentify,DuplicateInputs)1114 TEST_F(ValidationTestIdentify, DuplicateInputs) {
1115 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2, 0}, {3}), ANEURALNETWORKS_BAD_DATA);
1116 }
1117
TEST_F(ValidationTestIdentify,DuplicateOutputs)1118 TEST_F(ValidationTestIdentify, DuplicateOutputs) {
1119 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2}, {3, 3}), ANEURALNETWORKS_BAD_DATA);
1120 }
1121
1122 // Also see TEST_F(ValidationTestCompilationForDevices_1, SetPreference)
TEST_F(ValidationTestCompilation,SetPreference)1123 TEST_F(ValidationTestCompilation, SetPreference) {
1124 EXPECT_EQ(ANeuralNetworksCompilation_setPreference(nullptr, ANEURALNETWORKS_PREFER_LOW_POWER),
1125 ANEURALNETWORKS_UNEXPECTED_NULL);
1126
1127 EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation, 40), ANEURALNETWORKS_BAD_DATA);
1128 }
1129
1130 // Also see TEST_F(ValidationTestCompilationForDevices_1, SetCaching)
TEST_F(ValidationTestCompilation,SetCaching)1131 TEST_F(ValidationTestCompilation, SetCaching) {
1132 std::vector<uint8_t> token(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
1133 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(nullptr, NN_TMP_DIR, token.data()),
1134 ANEURALNETWORKS_UNEXPECTED_NULL);
1135 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(mCompilation, nullptr, token.data()),
1136 ANEURALNETWORKS_UNEXPECTED_NULL);
1137 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(mCompilation, NN_TMP_DIR, nullptr),
1138 ANEURALNETWORKS_UNEXPECTED_NULL);
1139 }
1140
TEST_F(ValidationTestCompilation,SetPriority)1141 TEST_F(ValidationTestCompilation, SetPriority) {
1142 EXPECT_EQ(ANeuralNetworksCompilation_setPriority(nullptr, ANEURALNETWORKS_PRIORITY_DEFAULT),
1143 ANEURALNETWORKS_UNEXPECTED_NULL);
1144
1145 // Test invalid values of priority.
1146 constexpr int kInvalidPriorities[] = {0,
1147 ANEURALNETWORKS_PRIORITY_LOW - 1,
1148 ANEURALNETWORKS_PRIORITY_LOW + 1,
1149 ANEURALNETWORKS_PRIORITY_MEDIUM - 1,
1150 ANEURALNETWORKS_PRIORITY_MEDIUM + 1,
1151 ANEURALNETWORKS_PRIORITY_HIGH - 1,
1152 ANEURALNETWORKS_PRIORITY_HIGH + 1};
1153 for (int invalidPriority : kInvalidPriorities) {
1154 EXPECT_EQ(ANeuralNetworksCompilation_setPriority(mCompilation, invalidPriority),
1155 ANEURALNETWORKS_BAD_DATA);
1156 }
1157 }
1158
1159 // Also see TEST_F(ValidationTestCompilationForDevices_1, SetTimeout)
1160 // Also see TEST_F(ValidationTestCompilationForDevices_2, SetTimeout)
TEST_F(ValidationTestCompilation,SetTimeout)1161 TEST_F(ValidationTestCompilation, SetTimeout) {
1162 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(nullptr, kShortWaitInNanoseconds),
1163 ANEURALNETWORKS_UNEXPECTED_NULL);
1164 // Timeout can only be set on Compilations created from CompilationForDevices with one device
1165 // specified.
1166 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(mCompilation, kShortWaitInNanoseconds),
1167 ANEURALNETWORKS_BAD_DATA);
1168 }
1169
TEST_F(ValidationTestCompilation,GetPreferredMemoryAlignmentAndPadding)1170 TEST_F(ValidationTestCompilation, GetPreferredMemoryAlignmentAndPadding) {
1171 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1172 uint32_t result;
1173
1174 // The following calls should fail, because the compilation has not been finished.
1175 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation, 0,
1176 &result),
1177 ANEURALNETWORKS_BAD_STATE);
1178 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation, 0,
1179 &result),
1180 ANEURALNETWORKS_BAD_STATE);
1181 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mCompilation, 0,
1182 &result),
1183 ANEURALNETWORKS_BAD_STATE);
1184 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation, 0,
1185 &result),
1186 ANEURALNETWORKS_BAD_STATE);
1187
1188 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
1189
1190 // The following calls should fail because of unexpected nullptr.
1191 EXPECT_EQ(
1192 ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(nullptr, 0, &result),
1193 ANEURALNETWORKS_UNEXPECTED_NULL);
1194 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(mCompilation, 0,
1195 nullptr),
1196 ANEURALNETWORKS_UNEXPECTED_NULL);
1197 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(nullptr, 0, &result),
1198 ANEURALNETWORKS_UNEXPECTED_NULL);
1199 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mCompilation, 0,
1200 nullptr),
1201 ANEURALNETWORKS_UNEXPECTED_NULL);
1202 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(nullptr, 0,
1203 &result),
1204 ANEURALNETWORKS_UNEXPECTED_NULL);
1205 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(mCompilation, 0,
1206 nullptr),
1207 ANEURALNETWORKS_UNEXPECTED_NULL);
1208 EXPECT_EQ(
1209 ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(nullptr, 0, &result),
1210 ANEURALNETWORKS_UNEXPECTED_NULL);
1211 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mCompilation, 0,
1212 nullptr),
1213 ANEURALNETWORKS_UNEXPECTED_NULL);
1214
1215 // The following calls should fail, because the index is out of range.
1216 const uint32_t invalidIndex = 1000;
1217 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
1218 mCompilation, invalidIndex, &result),
1219 ANEURALNETWORKS_BAD_DATA);
1220 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(
1221 mCompilation, invalidIndex, &result),
1222 ANEURALNETWORKS_BAD_DATA);
1223 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
1224 mCompilation, invalidIndex, &result),
1225 ANEURALNETWORKS_BAD_DATA);
1226 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(
1227 mCompilation, invalidIndex, &result),
1228 ANEURALNETWORKS_BAD_DATA);
1229
1230 } else {
1231 GTEST_SKIP();
1232 }
1233 }
1234
1235 // Also see TEST_F(ValidationTestCompilationForDevices_1, CreateExecution)
TEST_F(ValidationTestCompilation,CreateExecution)1236 TEST_F(ValidationTestCompilation, CreateExecution) {
1237 ANeuralNetworksExecution* execution = nullptr;
1238 EXPECT_EQ(ANeuralNetworksExecution_create(nullptr, &execution),
1239 ANEURALNETWORKS_UNEXPECTED_NULL);
1240 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, nullptr),
1241 ANEURALNETWORKS_UNEXPECTED_NULL);
1242 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_BAD_STATE);
1243 }
1244
1245 // Also see TEST_F(ValidationTestCompilationForDevices_1, Finish)
TEST_F(ValidationTestCompilation,Finish)1246 TEST_F(ValidationTestCompilation, Finish) {
1247 EXPECT_EQ(ANeuralNetworksCompilation_finish(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
1248 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
1249 EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation,
1250 ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER),
1251 ANEURALNETWORKS_BAD_STATE);
1252 EXPECT_EQ(
1253 ANeuralNetworksCompilation_setPriority(mCompilation, ANEURALNETWORKS_PRIORITY_DEFAULT),
1254 ANEURALNETWORKS_BAD_STATE);
1255 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(mCompilation, kShortWaitInNanoseconds),
1256 ANEURALNETWORKS_BAD_STATE);
1257 std::vector<uint8_t> token(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
1258 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(mCompilation, NN_TMP_DIR, token.data()),
1259 ANEURALNETWORKS_BAD_STATE);
1260 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_STATE);
1261 }
1262
1263 // Also see TEST_F(ValidationTestCompilationForDevices_1, ExecutionSetTimeout)
1264 // Also see TEST_F(ValidationTestCompilationForDevices_2, ExecutionSetTimeout)
TEST_F(ValidationTestCompilation,ExecutionSetTimeout)1265 TEST_F(ValidationTestCompilation, ExecutionSetTimeout) {
1266 EXPECT_EQ(ANeuralNetworksExecution_setTimeout(nullptr, kShortWaitInNanoseconds),
1267 ANEURALNETWORKS_UNEXPECTED_NULL);
1268
1269 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
1270 ANeuralNetworksExecution* execution;
1271 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
1272 // Timeout can only be set on Compilations created from CompilationForDevices with one device
1273 // specified.
1274 EXPECT_EQ(ANeuralNetworksExecution_setTimeout(execution, kShortWaitInNanoseconds),
1275 ANEURALNETWORKS_BAD_DATA);
1276 ANeuralNetworksExecution_free(execution);
1277 }
1278
1279 // Also see TEST_F(ValidationTestCompilationForDevices_1, ExecutionTiming)
1280 // Also see TEST_F(ValidationTestCompilationForDevices_2, ExecutionTiming)
TEST_F(ValidationTestCompilation,ExecutionTiming)1281 TEST_F(ValidationTestCompilation, ExecutionTiming) {
1282 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
1283 ANeuralNetworksExecution* execution;
1284 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
1285 // Cannot setMeasureTiming() with Compilation rather than CompilationForDevices.
1286 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, false),
1287 ANEURALNETWORKS_BAD_DATA);
1288 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, true), ANEURALNETWORKS_BAD_DATA);
1289
1290 // close memory
1291 ANeuralNetworksExecution_free(execution);
1292 }
1293
1294 // Also see TEST_F(ValidationTestCompilationForDevices_1, ExecutionTiming)
TEST_F(ValidationTestCompilation,ExecutionUsability)1295 TEST_F(ValidationTestCompilation, ExecutionUsability) {
1296 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
1297
1298 enum class ExecutionType : uint32_t { ASYNC, SYNC, BURST, FENCED };
1299 for (auto executionType :
1300 {ExecutionType::ASYNC, ExecutionType::SYNC, ExecutionType::BURST, ExecutionType::FENCED}) {
1301 for (bool explicitlyDisableReusablility : {false, true}) {
1302 SCOPED_TRACE(static_cast<uint32_t>(executionType));
1303 SCOPED_TRACE(explicitlyDisableReusablility);
1304
1305 ANeuralNetworksExecution* execution;
1306 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
1307 ANEURALNETWORKS_NO_ERROR);
1308
1309 if (explicitlyDisableReusablility) {
1310 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1311 ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, false),
1312 ANEURALNETWORKS_NO_ERROR);
1313 } else {
1314 ANeuralNetworksExecution_free(execution);
1315 continue;
1316 }
1317 }
1318
1319 // Set inputs and outputs.
1320 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2];
1321 int in2 = 0;
1322 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)),
1323 ANEURALNETWORKS_NO_ERROR);
1324 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)),
1325 ANEURALNETWORKS_NO_ERROR);
1326 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)),
1327 ANEURALNETWORKS_NO_ERROR);
1328 ASSERT_EQ(
1329 ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)),
1330 ANEURALNETWORKS_NO_ERROR);
1331
1332 const size_t memorySize = std::max(sizeof(in0), sizeof(out0));
1333 #ifdef __ANDROID__
1334 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
1335 #else // __ANDROID__
1336 TemporaryFile tmpFile;
1337 int memoryFd = tmpFile.release();
1338 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
1339 #endif // __ANDROID__
1340 ASSERT_GT(memoryFd, 0);
1341 ANeuralNetworksMemory* memory;
1342 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE,
1343 memoryFd, 0, &memory),
1344 ANEURALNETWORKS_NO_ERROR);
1345
1346 auto testTooLate = [this, execution, &in0, &out0, memory] {
1347 // Try a bunch of things that are impermissible if the execution has started.
1348
1349 // Set loop timeout.
1350 ASSERT_EQ(
1351 ANeuralNetworksExecution_setLoopTimeout(execution, kShortWaitInNanoseconds),
1352 ANEURALNETWORKS_BAD_STATE);
1353
1354 // Enable/Disable input and output padding.
1355 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1356 ASSERT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true),
1357 ANEURALNETWORKS_BAD_STATE);
1358 ASSERT_EQ(
1359 ANeuralNetworksExecution_enableInputAndOutputPadding(execution, false),
1360 ANEURALNETWORKS_BAD_STATE);
1361 }
1362
1363 // Set inputs and outputs.
1364 ASSERT_EQ(
1365 ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)),
1366 ANEURALNETWORKS_BAD_STATE);
1367 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0,
1368 sizeof(out0)),
1369 ANEURALNETWORKS_BAD_STATE);
1370 ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory,
1371 0, sizeof(in0)),
1372 ANEURALNETWORKS_BAD_STATE);
1373 ASSERT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr,
1374 memory, 0, sizeof(out0)),
1375 ANEURALNETWORKS_BAD_STATE);
1376
1377 // Set reusable.
1378 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1379 ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, true),
1380 ANEURALNETWORKS_BAD_STATE);
1381 ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, false),
1382 ANEURALNETWORKS_BAD_STATE);
1383 }
1384
1385 // Reuse for asynchronous execution.
1386 {
1387 ANeuralNetworksEvent* event;
1388 ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event),
1389 ANEURALNETWORKS_BAD_STATE);
1390 }
1391
1392 // Reuse for synchronous execution.
1393 ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_BAD_STATE);
1394
1395 // Reuse for burst execution.
1396 {
1397 ANeuralNetworksBurst* burst;
1398 ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst),
1399 ANEURALNETWORKS_NO_ERROR);
1400 ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst),
1401 ANEURALNETWORKS_BAD_STATE);
1402 ANeuralNetworksBurst_free(burst);
1403 }
1404
1405 // Reuse for fenced execution.
1406 {
1407 ANeuralNetworksEvent* event;
1408 ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(
1409 execution, nullptr, 0, 0, &event),
1410 ANEURALNETWORKS_BAD_STATE);
1411 }
1412 };
1413
1414 // Compute.
1415 switch (executionType) {
1416 case ExecutionType::ASYNC: {
1417 ANeuralNetworksEvent* event;
1418 ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event),
1419 ANEURALNETWORKS_NO_ERROR);
1420 testTooLate();
1421 ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR);
1422 testTooLate();
1423 ANeuralNetworksEvent_free(event);
1424 break;
1425 }
1426 case ExecutionType::SYNC: {
1427 ASSERT_EQ(ANeuralNetworksExecution_compute(execution),
1428 ANEURALNETWORKS_NO_ERROR);
1429 testTooLate();
1430 break;
1431 }
1432 case ExecutionType::BURST: {
1433 ANeuralNetworksBurst* burst;
1434 ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst),
1435 ANEURALNETWORKS_NO_ERROR);
1436 ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst),
1437 ANEURALNETWORKS_NO_ERROR);
1438 testTooLate();
1439 ANeuralNetworksBurst_free(burst);
1440 break;
1441 }
1442 case ExecutionType::FENCED: {
1443 ANeuralNetworksEvent* event;
1444 ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(
1445 execution, nullptr, 0, 0, &event),
1446 ANEURALNETWORKS_NO_ERROR);
1447 testTooLate();
1448 ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR);
1449 testTooLate();
1450 ANeuralNetworksEvent_free(event);
1451 break;
1452 }
1453 default:
1454 FAIL() << "Unreachable";
1455 }
1456
1457 // close memory
1458 ANeuralNetworksExecution_free(execution);
1459 ANeuralNetworksMemory_free(memory);
1460 close(memoryFd);
1461 }
1462 }
1463 }
1464
testConcurrentExecution(bool reusable,ANeuralNetworksCompilation * compilation)1465 static void testConcurrentExecution(bool reusable, ANeuralNetworksCompilation* compilation) {
1466 ASSERT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR);
1467
1468 enum class ExecutionType : uint32_t { ASYNC, SYNC, BURST, FENCED };
1469 const auto compute = [compilation](ExecutionType executionType,
1470 ANeuralNetworksExecution* execution) -> int {
1471 switch (executionType) {
1472 case ExecutionType::ASYNC: {
1473 ANeuralNetworksEvent* event;
1474 int result = ANeuralNetworksExecution_startCompute(execution, &event);
1475 if (result == ANEURALNETWORKS_NO_ERROR) {
1476 result = ANeuralNetworksEvent_wait(event);
1477 }
1478 ANeuralNetworksEvent_free(event);
1479 return result;
1480 }
1481 case ExecutionType::SYNC: {
1482 return ANeuralNetworksExecution_compute(execution);
1483 }
1484 case ExecutionType::BURST: {
1485 ANeuralNetworksBurst* burst;
1486 int result = ANeuralNetworksBurst_create(compilation, &burst);
1487 if (result == ANEURALNETWORKS_NO_ERROR) {
1488 result = ANeuralNetworksExecution_burstCompute(execution, burst);
1489 }
1490 ANeuralNetworksBurst_free(burst);
1491 return result;
1492 }
1493 case ExecutionType::FENCED: {
1494 ANeuralNetworksEvent* event;
1495 int result = ANeuralNetworksExecution_startComputeWithDependencies(
1496 execution, nullptr, 0, 0, &event);
1497 if (result == ANEURALNETWORKS_NO_ERROR) {
1498 result = ANeuralNetworksEvent_wait(event);
1499 }
1500 ANeuralNetworksEvent_free(event);
1501 return result;
1502 }
1503 }
1504 };
1505
1506 const std::vector<ExecutionType> kExecutionTypes = {
1507 ExecutionType::ASYNC, ExecutionType::SYNC, ExecutionType::BURST, ExecutionType::FENCED};
1508 for (auto executionType1 : kExecutionTypes) {
1509 for (auto executionType2 : kExecutionTypes) {
1510 SCOPED_TRACE(static_cast<uint32_t>(executionType1));
1511 SCOPED_TRACE(static_cast<uint32_t>(executionType2));
1512
1513 ANeuralNetworksExecution* execution;
1514 ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution),
1515 ANEURALNETWORKS_NO_ERROR);
1516
1517 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2];
1518 int in2 = 0;
1519 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)),
1520 ANEURALNETWORKS_NO_ERROR);
1521 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)),
1522 ANEURALNETWORKS_NO_ERROR);
1523 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)),
1524 ANEURALNETWORKS_NO_ERROR);
1525 ASSERT_EQ(
1526 ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)),
1527 ANEURALNETWORKS_NO_ERROR);
1528 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1529 ASSERT_EQ(ANeuralNetworksExecution_setReusable(execution, reusable),
1530 ANEURALNETWORKS_NO_ERROR);
1531 } else {
1532 if (reusable) {
1533 ANeuralNetworksExecution_free(execution);
1534 return;
1535 }
1536 }
1537
1538 // Compute on the same execution concurrently.
1539 auto first = std::async(std::launch::async, [compute, executionType1, execution] {
1540 return compute(executionType1, execution);
1541 });
1542 auto second = std::async(std::launch::async, [compute, executionType2, execution] {
1543 return compute(executionType2, execution);
1544 });
1545 const int result1 = first.get();
1546 const int result2 = second.get();
1547
1548 // At least one result must be ANEURALNETWORKS_NO_ERROR. One may return
1549 // ANEURALNETWORKS_BAD_STATE if the other is already executing.
1550 EXPECT_TRUE(result1 == ANEURALNETWORKS_BAD_STATE ||
1551 result1 == ANEURALNETWORKS_NO_ERROR);
1552 EXPECT_TRUE(result2 == ANEURALNETWORKS_BAD_STATE ||
1553 result2 == ANEURALNETWORKS_NO_ERROR);
1554 EXPECT_TRUE(result1 == ANEURALNETWORKS_NO_ERROR || result2 == ANEURALNETWORKS_NO_ERROR);
1555
1556 // If the execution is not reusable, one result must be ANEURALNETWORKS_BAD_STATE.
1557 if (!reusable) {
1558 EXPECT_TRUE(result1 == ANEURALNETWORKS_BAD_STATE ||
1559 result2 == ANEURALNETWORKS_BAD_STATE);
1560 }
1561
1562 ANeuralNetworksExecution_free(execution);
1563 }
1564 }
1565 }
1566
1567 // Also see TEST_F(ValidationTestBurst, BurstComputeConcurrent)
TEST_F(ValidationTestCompilation,ReusableExecutionConcurrent)1568 TEST_F(ValidationTestCompilation, ReusableExecutionConcurrent) {
1569 testConcurrentExecution(/*reusable=*/true, mCompilation);
1570 }
TEST_F(ValidationTestCompilation,NonReusableExecutionConcurrent)1571 TEST_F(ValidationTestCompilation, NonReusableExecutionConcurrent) {
1572 testConcurrentExecution(/*reusable=*/false, mCompilation);
1573 }
1574
TEST_F(ValidationTestExecution,SetLoopTimeout)1575 TEST_F(ValidationTestExecution, SetLoopTimeout) {
1576 EXPECT_EQ(ANeuralNetworksExecution_setLoopTimeout(nullptr, kShortWaitInNanoseconds),
1577 ANEURALNETWORKS_UNEXPECTED_NULL);
1578 }
1579
TEST_F(ValidationTestExecution,EnableInputAndOutputPadding)1580 TEST_F(ValidationTestExecution, EnableInputAndOutputPadding) {
1581 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1582 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(nullptr, true),
1583 ANEURALNETWORKS_UNEXPECTED_NULL);
1584 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(nullptr, false),
1585 ANEURALNETWORKS_UNEXPECTED_NULL);
1586 } else {
1587 GTEST_SKIP();
1588 }
1589 }
1590
TEST_F(ValidationTestExecution,ExecutionSetReusable)1591 TEST_F(ValidationTestExecution, ExecutionSetReusable) {
1592 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1593 EXPECT_EQ(ANeuralNetworksExecution_setReusable(nullptr, true),
1594 ANEURALNETWORKS_UNEXPECTED_NULL);
1595 EXPECT_EQ(ANeuralNetworksExecution_setReusable(nullptr, false),
1596 ANEURALNETWORKS_UNEXPECTED_NULL);
1597 } else {
1598 GTEST_SKIP();
1599 }
1600 }
1601
TEST_F(ValidationTestExecution,SetInput)1602 TEST_F(ValidationTestExecution, SetInput) {
1603 char buffer[20];
1604 EXPECT_EQ(ANeuralNetworksExecution_setInput(nullptr, 0, nullptr, buffer, sizeof(float)),
1605 ANEURALNETWORKS_UNEXPECTED_NULL);
1606 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, nullptr, sizeof(float)),
1607 ANEURALNETWORKS_UNEXPECTED_NULL);
1608
1609 // This should fail, because memory is not the size of a float32.
1610 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer, 20),
1611 ANEURALNETWORKS_BAD_DATA);
1612
1613 // This should fail, as this operand does not exist.
1614 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 999, nullptr, buffer, sizeof(float)),
1615 ANEURALNETWORKS_BAD_DATA);
1616
1617 // This should fail, as this operand does not exist.
1618 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, -1, nullptr, buffer, sizeof(float)),
1619 ANEURALNETWORKS_BAD_DATA);
1620
1621 // These should fail, because the tensor types are invalid.
1622 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, &kInvalidTensorType1, buffer,
1623 sizeof(float)),
1624 ANEURALNETWORKS_BAD_DATA);
1625 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, &kInvalidTensorType2, buffer,
1626 sizeof(float)),
1627 ANEURALNETWORKS_BAD_DATA);
1628
1629 // Cannot do this twice.
1630 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer, 8),
1631 ANEURALNETWORKS_NO_ERROR);
1632 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer, 8),
1633 ANEURALNETWORKS_BAD_STATE);
1634 }
1635
TEST_F(ValidationTestExecution,SetInputEnablePadding)1636 TEST_F(ValidationTestExecution, SetInputEnablePadding) {
1637 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1638 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true),
1639 ANEURALNETWORKS_NO_ERROR);
1640
1641 // This should fail, because length is less than the size of a float32.
1642 char buffer[20];
1643 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer,
1644 sizeof(float) - 1),
1645 ANEURALNETWORKS_BAD_DATA);
1646 } else {
1647 GTEST_SKIP();
1648 }
1649 }
1650
TEST_F(ValidationTestExecution,SetOutput)1651 TEST_F(ValidationTestExecution, SetOutput) {
1652 char buffer[20];
1653 EXPECT_EQ(ANeuralNetworksExecution_setOutput(nullptr, 0, nullptr, buffer, sizeof(float)),
1654 ANEURALNETWORKS_UNEXPECTED_NULL);
1655 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, nullptr, sizeof(float)),
1656 ANEURALNETWORKS_UNEXPECTED_NULL);
1657
1658 // This should fail, because memory is not the size of a float32.
1659 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, buffer, 20),
1660 ANEURALNETWORKS_BAD_DATA);
1661
1662 // This should fail, as this operand does not exist.
1663 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 999, nullptr, buffer, sizeof(float)),
1664 ANEURALNETWORKS_BAD_DATA);
1665
1666 // This should fail, as this operand does not exist.
1667 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, -1, nullptr, buffer, sizeof(float)),
1668 ANEURALNETWORKS_BAD_DATA);
1669
1670 // These should fail, because the tensor types are invalid.
1671 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, &kInvalidTensorType1, buffer,
1672 sizeof(float)),
1673 ANEURALNETWORKS_BAD_DATA);
1674 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, &kInvalidTensorType2, buffer,
1675 sizeof(float)),
1676 ANEURALNETWORKS_BAD_DATA);
1677
1678 // Cannot do this twice.
1679 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, buffer, 8),
1680 ANEURALNETWORKS_NO_ERROR);
1681 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, buffer, 8),
1682 ANEURALNETWORKS_BAD_STATE);
1683 }
1684
TEST_F(ValidationTestExecution,SetOutputEnablePadding)1685 TEST_F(ValidationTestExecution, SetOutputEnablePadding) {
1686 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1687 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true),
1688 ANEURALNETWORKS_NO_ERROR);
1689
1690 // This should fail, because length is less than the size of a float32.
1691 char buffer[20];
1692 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, buffer,
1693 sizeof(float) - 1),
1694 ANEURALNETWORKS_BAD_DATA);
1695 } else {
1696 GTEST_SKIP();
1697 }
1698 }
1699
TEST_F(ValidationTestExecution,SetInputFromMemory)1700 TEST_F(ValidationTestExecution, SetInputFromMemory) {
1701 const size_t memorySize = 20;
1702 #ifdef __ANDROID__
1703 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
1704 #else // __ANDROID__
1705 TemporaryFile tmpFile;
1706 int memoryFd = tmpFile.release();
1707 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
1708 #endif // __ANDROID__
1709 ASSERT_GT(memoryFd, 0);
1710
1711 ANeuralNetworksMemory* memory;
1712 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
1713 &memory),
1714 ANEURALNETWORKS_NO_ERROR);
1715
1716 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(nullptr, 0, nullptr, memory, 0,
1717 sizeof(float)),
1718 ANEURALNETWORKS_UNEXPECTED_NULL);
1719 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, nullptr, 0,
1720 sizeof(float)),
1721 ANEURALNETWORKS_UNEXPECTED_NULL);
1722
1723 // This should fail, because the operand does not exist.
1724 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 999, nullptr, memory, 0,
1725 sizeof(float)),
1726 ANEURALNETWORKS_BAD_DATA);
1727
1728 // This should fail, because the operand does not exist.
1729 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, -1, nullptr, memory, 0,
1730 sizeof(float)),
1731 ANEURALNETWORKS_BAD_DATA);
1732
1733 // This should fail, because memory is not the size of a float32.
1734 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0,
1735 memorySize),
1736 ANEURALNETWORKS_BAD_DATA);
1737
1738 // This should fail, because offset is larger than memorySize.
1739 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory,
1740 memorySize + 1, sizeof(float)),
1741 ANEURALNETWORKS_BAD_DATA);
1742
1743 // This should fail, because requested size is larger than the memory.
1744 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory,
1745 memorySize - 3, sizeof(float)),
1746 ANEURALNETWORKS_BAD_DATA);
1747
1748 // These should fail, because the tensor types are invalid.
1749 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, &kInvalidTensorType1,
1750 memory, 0, sizeof(float)),
1751 ANEURALNETWORKS_BAD_DATA);
1752 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, &kInvalidTensorType2,
1753 memory, 0, sizeof(float)),
1754 ANEURALNETWORKS_BAD_DATA);
1755
1756 // Cannot do this twice.
1757 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, 8),
1758 ANEURALNETWORKS_NO_ERROR);
1759 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, 8),
1760 ANEURALNETWORKS_BAD_STATE);
1761 char buffer[memorySize];
1762 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, buffer, 8),
1763 ANEURALNETWORKS_BAD_STATE);
1764
1765 // close memory
1766 ANeuralNetworksMemory_free(memory);
1767 close(memoryFd);
1768 }
1769
TEST_F(ValidationTestExecution,SetInputFromMemoryEnablePadding)1770 TEST_F(ValidationTestExecution, SetInputFromMemoryEnablePadding) {
1771 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1772 const size_t memorySize = 20;
1773 #ifdef __ANDROID__
1774 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
1775 #else // __ANDROID__
1776 TemporaryFile tmpFile;
1777 int memoryFd = tmpFile.release();
1778 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
1779 #endif // __ANDROID__
1780 ASSERT_GT(memoryFd, 0);
1781
1782 ANeuralNetworksMemory* memory;
1783 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd,
1784 0, &memory),
1785 ANEURALNETWORKS_NO_ERROR);
1786
1787 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true),
1788 ANEURALNETWORKS_NO_ERROR);
1789
1790 // This should fail, because length is less than the size of a float32.
1791 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0,
1792 sizeof(float) - 1),
1793 ANEURALNETWORKS_BAD_DATA);
1794
1795 // close memory
1796 ANeuralNetworksMemory_free(memory);
1797 close(memoryFd);
1798 } else {
1799 GTEST_SKIP();
1800 }
1801 }
1802
1803 #ifdef __ANDROID__
TEST_F(ValidationTestExecution,SetInputFromAHardwareBufferBlob)1804 TEST_F(ValidationTestExecution, SetInputFromAHardwareBufferBlob) {
1805 const size_t memorySize = 20;
1806
1807 AHardwareBuffer_Desc desc{
1808 .width = memorySize,
1809 .height = 1,
1810 .layers = 1,
1811 .format = AHARDWAREBUFFER_FORMAT_BLOB,
1812 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
1813 };
1814
1815 AHardwareBuffer* buffer = nullptr;
1816 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
1817
1818 ANeuralNetworksMemory* memory;
1819 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory),
1820 ANEURALNETWORKS_NO_ERROR);
1821
1822 // This should fail, because memory is not the size of a float32.
1823 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0,
1824 memorySize),
1825 ANEURALNETWORKS_BAD_DATA);
1826
1827 // This should fail, because offset is larger than memorySize.
1828 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory,
1829 memorySize + 1, sizeof(float)),
1830 ANEURALNETWORKS_BAD_DATA);
1831 // This should fail, because requested size is larger than the memory.
1832 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory,
1833 memorySize - 3, sizeof(float)),
1834 ANEURALNETWORKS_BAD_DATA);
1835
1836 // These should fail, because the tensor types are invalid.
1837 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, &kInvalidTensorType1,
1838 memory, 0, sizeof(float)),
1839 ANEURALNETWORKS_BAD_DATA);
1840 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, &kInvalidTensorType2,
1841 memory, 0, sizeof(float)),
1842 ANEURALNETWORKS_BAD_DATA);
1843
1844 // close memory
1845 ANeuralNetworksMemory_free(memory);
1846 AHardwareBuffer_release(buffer);
1847 }
1848
TEST_F(ValidationTestExecution,SetInputFromAHardwareBufferBlobEnablePadding)1849 TEST_F(ValidationTestExecution, SetInputFromAHardwareBufferBlobEnablePadding) {
1850 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1851 const size_t memorySize = 20;
1852
1853 AHardwareBuffer_Desc desc{
1854 .width = memorySize,
1855 .height = 1,
1856 .layers = 1,
1857 .format = AHARDWAREBUFFER_FORMAT_BLOB,
1858 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
1859 AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
1860 };
1861
1862 AHardwareBuffer* buffer = nullptr;
1863 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
1864
1865 ANeuralNetworksMemory* memory;
1866 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory),
1867 ANEURALNETWORKS_NO_ERROR);
1868
1869 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true),
1870 ANEURALNETWORKS_NO_ERROR);
1871
1872 // This should fail, because length is less than the size of a float32.
1873 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0,
1874 sizeof(float) - 1),
1875 ANEURALNETWORKS_BAD_DATA);
1876
1877 // close memory
1878 ANeuralNetworksMemory_free(memory);
1879 AHardwareBuffer_release(buffer);
1880 } else {
1881 GTEST_SKIP();
1882 }
1883 }
1884 #endif // __ANDROID__
1885
TEST_F(ValidationTestExecution,SetOutputFromMemory)1886 TEST_F(ValidationTestExecution, SetOutputFromMemory) {
1887 ANeuralNetworksExecution* execution;
1888 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
1889
1890 const size_t memorySize = 20;
1891 #ifdef __ANDROID__
1892 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
1893 #else // __ANDROID__
1894 TemporaryFile tmpFile;
1895 int memoryFd = tmpFile.release();
1896 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
1897 #endif // __ANDROID__
1898 ASSERT_GT(memoryFd, 0);
1899
1900 ANeuralNetworksMemory* memory;
1901 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
1902 &memory),
1903 ANEURALNETWORKS_NO_ERROR);
1904
1905 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(nullptr, 0, nullptr, memory, 0,
1906 sizeof(float)),
1907 ANEURALNETWORKS_UNEXPECTED_NULL);
1908 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, nullptr, 0,
1909 sizeof(float)),
1910 ANEURALNETWORKS_UNEXPECTED_NULL);
1911
1912 // This should fail, because the operand does not exist.
1913 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 999, nullptr, memory, 0,
1914 sizeof(float)),
1915 ANEURALNETWORKS_BAD_DATA);
1916
1917 // This should fail, because the operand does not exist.
1918 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, -1, nullptr, memory, 0,
1919 sizeof(float)),
1920 ANEURALNETWORKS_BAD_DATA);
1921
1922 // This should fail, because memory is not the size of a float32.
1923 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0,
1924 memorySize),
1925 ANEURALNETWORKS_BAD_DATA);
1926
1927 // This should fail, because offset is larger than memorySize.
1928 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory,
1929 memorySize + 1, sizeof(float)),
1930 ANEURALNETWORKS_BAD_DATA);
1931
1932 // This should fail, because requested size is larger than the memory.
1933 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory,
1934 memorySize - 3, sizeof(float)),
1935 ANEURALNETWORKS_BAD_DATA);
1936
1937 // These should fail, because the tensor types are invalid.
1938 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, &kInvalidTensorType1,
1939 memory, 0, sizeof(float)),
1940 ANEURALNETWORKS_BAD_DATA);
1941 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, &kInvalidTensorType2,
1942 memory, 0, sizeof(float)),
1943 ANEURALNETWORKS_BAD_DATA);
1944
1945 // Cannot do this twice.
1946 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 8),
1947 ANEURALNETWORKS_NO_ERROR);
1948 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 8),
1949 ANEURALNETWORKS_BAD_STATE);
1950 char buffer[memorySize];
1951 EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, buffer, 8),
1952 ANEURALNETWORKS_BAD_STATE);
1953
1954 // close memory
1955 ANeuralNetworksMemory_free(memory);
1956 ANeuralNetworksExecution_free(execution);
1957 close(memoryFd);
1958 }
1959
TEST_F(ValidationTestExecution,SetOutputFromMemoryEnablePadding)1960 TEST_F(ValidationTestExecution, SetOutputFromMemoryEnablePadding) {
1961 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
1962 ANeuralNetworksExecution* execution;
1963 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
1964 ANEURALNETWORKS_NO_ERROR);
1965
1966 const size_t memorySize = 20;
1967 #ifdef __ANDROID__
1968 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
1969 #else // __ANDROID__
1970 TemporaryFile tmpFile;
1971 int memoryFd = tmpFile.release();
1972 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
1973 #endif // __ANDROID__
1974 ASSERT_GT(memoryFd, 0);
1975
1976 ANeuralNetworksMemory* memory;
1977 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd,
1978 0, &memory),
1979 ANEURALNETWORKS_NO_ERROR);
1980
1981 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true),
1982 ANEURALNETWORKS_NO_ERROR);
1983
1984 // This should fail, because length is less than the size of a float32.
1985 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0,
1986 sizeof(float) - 1),
1987 ANEURALNETWORKS_BAD_DATA);
1988
1989 // close memory
1990 ANeuralNetworksMemory_free(memory);
1991 ANeuralNetworksExecution_free(execution);
1992 close(memoryFd);
1993 } else {
1994 GTEST_SKIP();
1995 }
1996 }
1997
1998 #ifdef __ANDROID__
TEST_F(ValidationTestExecution,SetOutputFromAHardwareBufferBlob)1999 TEST_F(ValidationTestExecution, SetOutputFromAHardwareBufferBlob) {
2000 const size_t memorySize = 20;
2001
2002 AHardwareBuffer_Desc desc{
2003 .width = memorySize,
2004 .height = 1,
2005 .layers = 1,
2006 .format = AHARDWAREBUFFER_FORMAT_BLOB,
2007 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
2008 };
2009
2010 AHardwareBuffer* buffer = nullptr;
2011 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
2012
2013 ANeuralNetworksMemory* memory;
2014 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory),
2015 ANEURALNETWORKS_NO_ERROR);
2016
2017 // This should fail, because memory is not the size of a float32.
2018 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0,
2019 memorySize),
2020 ANEURALNETWORKS_BAD_DATA);
2021
2022 // This should fail, because offset is larger than memorySize.
2023 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory,
2024 memorySize + 1, sizeof(float)),
2025 ANEURALNETWORKS_BAD_DATA);
2026
2027 // This should fail, because requested size is larger than the memory.
2028 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory,
2029 memorySize - 3, sizeof(float)),
2030 ANEURALNETWORKS_BAD_DATA);
2031
2032 // These should fail, because the tensor types are invalid.
2033 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, &kInvalidTensorType1,
2034 memory, 0, sizeof(float)),
2035 ANEURALNETWORKS_BAD_DATA);
2036 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, &kInvalidTensorType2,
2037 memory, 0, sizeof(float)),
2038 ANEURALNETWORKS_BAD_DATA);
2039
2040 // close memory
2041 ANeuralNetworksMemory_free(memory);
2042 AHardwareBuffer_release(buffer);
2043 }
2044
TEST_F(ValidationTestExecution,SetOutputFromAHardwareBufferBlobEnablePadding)2045 TEST_F(ValidationTestExecution, SetOutputFromAHardwareBufferBlobEnablePadding) {
2046 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
2047 const size_t memorySize = 20;
2048
2049 AHardwareBuffer_Desc desc{
2050 .width = memorySize,
2051 .height = 1,
2052 .layers = 1,
2053 .format = AHARDWAREBUFFER_FORMAT_BLOB,
2054 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
2055 AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
2056 };
2057
2058 AHardwareBuffer* buffer = nullptr;
2059 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
2060
2061 ANeuralNetworksMemory* memory;
2062 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, &memory),
2063 ANEURALNETWORKS_NO_ERROR);
2064
2065 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(mExecution, true),
2066 ANEURALNETWORKS_NO_ERROR);
2067
2068 // This should fail, because length is less than the size of a float32.
2069 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0,
2070 sizeof(float) - 1),
2071 ANEURALNETWORKS_BAD_DATA);
2072
2073 // close memory
2074 ANeuralNetworksMemory_free(memory);
2075 AHardwareBuffer_release(buffer);
2076 } else {
2077 GTEST_SKIP();
2078 }
2079 }
2080 #endif // __ANDROID__
2081
TEST_F(ValidationTestExecution,EnablePaddingAfterSetInputOutput)2082 TEST_F(ValidationTestExecution, EnablePaddingAfterSetInputOutput) {
2083 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
2084 ANeuralNetworksExecution* execution;
2085 char buffer[20];
2086 const size_t memorySize = 20;
2087 #ifdef __ANDROID__
2088 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
2089 #else // __ANDROID__
2090 TemporaryFile tmpFile;
2091 int memoryFd = tmpFile.release();
2092 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
2093 #endif // __ANDROID__
2094 ASSERT_GT(memoryFd, 0);
2095
2096 ANeuralNetworksMemory* memory;
2097 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd,
2098 0, &memory),
2099 ANEURALNETWORKS_NO_ERROR);
2100
2101 // Enable padding after setInput.
2102 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
2103 ANEURALNETWORKS_NO_ERROR);
2104 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, buffer, 8),
2105 ANEURALNETWORKS_NO_ERROR);
2106 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true),
2107 ANEURALNETWORKS_BAD_STATE);
2108 ANeuralNetworksExecution_free(execution);
2109
2110 // Enable padding after setInputFromMemory.
2111 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
2112 ANEURALNETWORKS_NO_ERROR);
2113 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, 8),
2114 ANEURALNETWORKS_NO_ERROR);
2115 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true),
2116 ANEURALNETWORKS_BAD_STATE);
2117 ANeuralNetworksExecution_free(execution);
2118
2119 // Enable padding after setOutput.
2120 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
2121 ANEURALNETWORKS_NO_ERROR);
2122 EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, buffer, 8),
2123 ANEURALNETWORKS_NO_ERROR);
2124 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true),
2125 ANEURALNETWORKS_BAD_STATE);
2126 ANeuralNetworksExecution_free(execution);
2127
2128 // Enable padding after setOutputFromMemory.
2129 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
2130 ANEURALNETWORKS_NO_ERROR);
2131 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0, 8),
2132 ANEURALNETWORKS_NO_ERROR);
2133 EXPECT_EQ(ANeuralNetworksExecution_enableInputAndOutputPadding(execution, true),
2134 ANEURALNETWORKS_BAD_STATE);
2135 ANeuralNetworksExecution_free(execution);
2136
2137 // close memory
2138 ANeuralNetworksMemory_free(memory);
2139 close(memoryFd);
2140 } else {
2141 GTEST_SKIP();
2142 }
2143 }
2144
TEST_F(ValidationTestExecutionDeviceMemory,SetInputFromMemory)2145 TEST_F(ValidationTestExecutionDeviceMemory, SetInputFromMemory) {
2146 ANeuralNetworksMemoryDesc* desc;
2147 ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
2148 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, mCompilation, 0, 1.0f),
2149 ANEURALNETWORKS_NO_ERROR);
2150
2151 // The following output roles are for init/deinit of the device memory.
2152 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mInitCompilation, 0, 1.0f),
2153 ANEURALNETWORKS_NO_ERROR);
2154 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mDeinitCompilation, 0, 1.0f),
2155 ANEURALNETWORKS_NO_ERROR);
2156
2157 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
2158
2159 ANeuralNetworksMemory* memory;
2160 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
2161 ANeuralNetworksMemoryDesc_free(desc);
2162
2163 // Uninitialized memory as input.
2164 executeWithMemoryAsInput(mCompilation, memory, ANEURALNETWORKS_OP_FAILED);
2165
2166 // The memory is deinitialized between setInputFromMemory and compute.
2167 {
2168 // Initialize device memory.
2169 executeWithMemoryAsOutput(mInitCompilation, memory, ANEURALNETWORKS_NO_ERROR);
2170
2171 float data = 0;
2172 ANeuralNetworksExecution* execution = nullptr;
2173 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
2174 ANEURALNETWORKS_NO_ERROR);
2175 ASSERT_EQ(ANeuralNetworksExecution_setInputFromMemory(execution, 0, nullptr, memory, 0, 0),
2176 ANEURALNETWORKS_NO_ERROR);
2177 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &data, sizeof(float)),
2178 ANEURALNETWORKS_NO_ERROR);
2179
2180 // Deinitialize device memory.
2181 executeWithMemoryAsOutput(mDeinitCompilation, memory, ANEURALNETWORKS_OP_FAILED);
2182
2183 // Uninitialized memory as input at compute time.
2184 ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_OP_FAILED);
2185 ANeuralNetworksExecution_free(execution);
2186 }
2187
2188 // Initialize device memory.
2189 executeWithMemoryAsOutput(mInitCompilation, memory, ANEURALNETWORKS_NO_ERROR);
2190
2191 // Bad offset and length.
2192 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 1, 0),
2193 ANEURALNETWORKS_BAD_DATA);
2194 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0,
2195 sizeof(float)),
2196 ANEURALNETWORKS_BAD_DATA);
2197
2198 // Bad usage -- not configured for this role.
2199 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0, 0),
2200 ANEURALNETWORKS_BAD_DATA);
2201
2202 // Deinitialize device memory.
2203 executeWithMemoryAsOutput(mDeinitCompilation, memory, ANEURALNETWORKS_OP_FAILED);
2204
2205 // Uninitialized memory as input.
2206 executeWithMemoryAsInput(mCompilation, memory, ANEURALNETWORKS_OP_FAILED);
2207
2208 ANeuralNetworksMemory_free(memory);
2209 }
2210
TEST_F(ValidationTestExecutionDeviceMemory,SetOutputFromMemory)2211 TEST_F(ValidationTestExecutionDeviceMemory, SetOutputFromMemory) {
2212 ANeuralNetworksMemoryDesc* desc;
2213 ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
2214 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mCompilation, 0, 1.0f),
2215 ANEURALNETWORKS_NO_ERROR);
2216 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
2217
2218 ANeuralNetworksMemory* memory;
2219 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
2220 ANeuralNetworksMemoryDesc_free(desc);
2221
2222 // Bad offset and length.
2223 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 1, 0),
2224 ANEURALNETWORKS_BAD_DATA);
2225 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecution, 0, nullptr, memory, 0,
2226 sizeof(float)),
2227 ANEURALNETWORKS_BAD_DATA);
2228
2229 // Bad usage -- not configured for this role.
2230 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecution, 0, nullptr, memory, 0, 0),
2231 ANEURALNETWORKS_BAD_DATA);
2232
2233 ANeuralNetworksMemory_free(memory);
2234 }
2235
TEST_F(ValidationTestExecutionDeviceMemory,SetInputFromMemory_DynamicShape)2236 TEST_F(ValidationTestExecutionDeviceMemory, SetInputFromMemory_DynamicShape) {
2237 uint32_t dimension = 1, badDimension = 2;
2238 ANeuralNetworksOperandType badType = {
2239 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2240 .dimensionCount = 1,
2241 .dimensions = &badDimension,
2242 };
2243
2244 ANeuralNetworksMemoryDesc* desc;
2245 ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
2246 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, mCompilationDynamic, 0, 1.0f),
2247 ANEURALNETWORKS_NO_ERROR);
2248 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(desc, 1, &dimension),
2249 ANEURALNETWORKS_NO_ERROR);
2250
2251 // The following output role is for init of the device memory.
2252 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mInitCompilation, 0, 1.0f),
2253 ANEURALNETWORKS_NO_ERROR);
2254
2255 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
2256
2257 ANeuralNetworksMemory* memory;
2258 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
2259 ANeuralNetworksMemoryDesc_free(desc);
2260
2261 // Initialize device memory.
2262 executeWithMemoryAsOutput(mInitCompilation, memory, ANEURALNETWORKS_NO_ERROR);
2263
2264 // Incompatible dimensions between updated type and memory.
2265 EXPECT_EQ(ANeuralNetworksExecution_setInputFromMemory(mExecutionDynamic, 0, &badType, memory, 0,
2266 0),
2267 ANEURALNETWORKS_BAD_DATA);
2268
2269 ANeuralNetworksMemory_free(memory);
2270 }
2271
TEST_F(ValidationTestExecutionDeviceMemory,SetOutputFromMemory_DynamicShape)2272 TEST_F(ValidationTestExecutionDeviceMemory, SetOutputFromMemory_DynamicShape) {
2273 uint32_t dimension = 1, badDimension = 2;
2274 ANeuralNetworksOperandType badType = {
2275 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2276 .dimensionCount = 1,
2277 .dimensions = &badDimension,
2278 };
2279
2280 ANeuralNetworksMemoryDesc* desc;
2281 ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
2282 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mCompilationDynamic, 0, 1.0f),
2283 ANEURALNETWORKS_NO_ERROR);
2284 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(desc, 1, &dimension),
2285 ANEURALNETWORKS_NO_ERROR);
2286 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
2287
2288 ANeuralNetworksMemory* memory;
2289 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &memory), ANEURALNETWORKS_NO_ERROR);
2290 ANeuralNetworksMemoryDesc_free(desc);
2291
2292 // Incompatible dimensions between updated type and memory.
2293 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(mExecutionDynamic, 0, &badType, memory,
2294 0, 0),
2295 ANEURALNETWORKS_BAD_DATA);
2296
2297 ANeuralNetworksMemory_free(memory);
2298 }
2299
TEST_F(ValidationTestExecution,Compute)2300 TEST_F(ValidationTestExecution, Compute) {
2301 EXPECT_EQ(ANeuralNetworksExecution_compute(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2302 }
2303
TEST_F(ValidationTestExecution,StartCompute)2304 TEST_F(ValidationTestExecution, StartCompute) {
2305 ANeuralNetworksExecution* execution;
2306 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
2307
2308 ANeuralNetworksEvent* event;
2309 EXPECT_EQ(ANeuralNetworksExecution_startCompute(nullptr, &event),
2310 ANEURALNETWORKS_UNEXPECTED_NULL);
2311 EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, nullptr),
2312 ANEURALNETWORKS_UNEXPECTED_NULL);
2313
2314 // close memory
2315 ANeuralNetworksExecution_free(execution);
2316 }
2317
TEST_F(ValidationTestExecution,EventWait)2318 TEST_F(ValidationTestExecution, EventWait) {
2319 EXPECT_EQ(ANeuralNetworksEvent_wait(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2320 }
2321
TEST_F(ValidationTest,EventCreateFromSyncFenceFd)2322 TEST_F(ValidationTest, EventCreateFromSyncFenceFd) {
2323 ANeuralNetworksEvent* event;
2324 EXPECT_EQ(ANeuralNetworksEvent_createFromSyncFenceFd(-1, &event), ANEURALNETWORKS_BAD_DATA);
2325 EXPECT_EQ(ANeuralNetworksEvent_createFromSyncFenceFd(1, nullptr),
2326 ANEURALNETWORKS_UNEXPECTED_NULL);
2327 }
2328
TEST_F(ValidationTest,EventGetSyncFenceFd)2329 TEST_F(ValidationTest, EventGetSyncFenceFd) {
2330 int syncFd = -100;
2331 EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(nullptr, &syncFd),
2332 ANEURALNETWORKS_UNEXPECTED_NULL);
2333 EXPECT_EQ(syncFd, -1);
2334 }
2335
TEST_F(ValidationTestExecution,EventGetSyncFenceFdFromStartCompute)2336 TEST_F(ValidationTestExecution, EventGetSyncFenceFdFromStartCompute) {
2337 // Create a valid execution and event first.
2338 ANeuralNetworksExecution* execution;
2339 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
2340 float input0[] = {1.0f, 1.0f}, input1[] = {2.0f, 2.0f}, output0[2];
2341 int32_t input2[] = {0};
2342 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, input0, sizeof(input0)),
2343 ANEURALNETWORKS_NO_ERROR);
2344 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, input1, sizeof(input1)),
2345 ANEURALNETWORKS_NO_ERROR);
2346 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, input2, sizeof(input2)),
2347 ANEURALNETWORKS_NO_ERROR);
2348 EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, output0, sizeof(output0)),
2349 ANEURALNETWORKS_NO_ERROR);
2350 ANeuralNetworksEvent* event = nullptr;
2351 EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), ANEURALNETWORKS_NO_ERROR);
2352
2353 // The event from startCompute is not backed by sync fence.
2354 int syncFd = -100;
2355 EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(event, &syncFd), ANEURALNETWORKS_BAD_DATA);
2356 EXPECT_EQ(syncFd, -1);
2357
2358 ANeuralNetworksEvent_free(event);
2359 ANeuralNetworksExecution_free(execution);
2360 }
2361
TEST_F(ValidationTestExecution,FencedExecution)2362 TEST_F(ValidationTestExecution, FencedExecution) {
2363 // Create a valid execution and event first.
2364 ANeuralNetworksExecution* execution1;
2365 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution1), ANEURALNETWORKS_NO_ERROR);
2366 float input0[] = {1.0f, 1.0f}, input1[] = {2.0f, 2.0f}, output0[2];
2367 int32_t input2[] = {0};
2368 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution1, 0, nullptr, input0, sizeof(input0)),
2369 ANEURALNETWORKS_NO_ERROR);
2370 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution1, 1, nullptr, input1, sizeof(input1)),
2371 ANEURALNETWORKS_NO_ERROR);
2372 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution1, 2, nullptr, input2, sizeof(input2)),
2373 ANEURALNETWORKS_NO_ERROR);
2374 EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution1, 0, nullptr, output0, sizeof(output0)),
2375 ANEURALNETWORKS_NO_ERROR);
2376 ANeuralNetworksEvent* event1 = nullptr;
2377 EXPECT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution1, nullptr, 0, 0,
2378 &event1),
2379 ANEURALNETWORKS_NO_ERROR);
2380
2381 EXPECT_EQ(ANeuralNetworksEvent_getSyncFenceFd(event1, nullptr),
2382 ANEURALNETWORKS_UNEXPECTED_NULL);
2383
2384 // The event from startComputeWithDependencie may or may not be backed by a sync fence depending
2385 // on the driver implementation.
2386 int syncFd = -100;
2387 int getSyncFdResult = ANeuralNetworksEvent_getSyncFenceFd(event1, &syncFd);
2388 if (getSyncFdResult == ANEURALNETWORKS_NO_ERROR) {
2389 EXPECT_GE(syncFd, 0);
2390 close(syncFd);
2391 } else {
2392 EXPECT_EQ(getSyncFdResult, ANEURALNETWORKS_BAD_DATA);
2393 EXPECT_EQ(syncFd, -1);
2394 }
2395
2396 // The subsequent execution will wait for the first execution to finish.
2397 ANeuralNetworksExecution* execution2;
2398 ANeuralNetworksEvent* event2 = nullptr;
2399 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution2), ANEURALNETWORKS_NO_ERROR);
2400 EXPECT_EQ(
2401 ANeuralNetworksExecution_startComputeWithDependencies(nullptr, &event1, 1, 0, &event2),
2402 ANEURALNETWORKS_UNEXPECTED_NULL);
2403 EXPECT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution2, nullptr, 1, 0,
2404 &event2),
2405 ANEURALNETWORKS_UNEXPECTED_NULL);
2406 EXPECT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution2, &event1, 1, 0,
2407 nullptr),
2408 ANEURALNETWORKS_UNEXPECTED_NULL);
2409 ANeuralNetworksEvent* wait_for_list[] = {event1, nullptr};
2410 EXPECT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution2, wait_for_list, 2, 0,
2411 &event2),
2412 ANEURALNETWORKS_UNEXPECTED_NULL);
2413
2414 ANeuralNetworksEvent_free(event1);
2415 ANeuralNetworksExecution_free(execution1);
2416 ANeuralNetworksExecution_free(execution2);
2417 }
2418
TEST_F(ValidationTestExecution,GetOutputOperandRankAndDimensions)2419 TEST_F(ValidationTestExecution, GetOutputOperandRankAndDimensions) {
2420 ANeuralNetworksExecution* execution;
2421 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
2422
2423 float input0[] = {1.0f, 1.0f}, input1[] = {2.0f, 2.0f}, output0[2];
2424 int32_t input2[] = {0};
2425 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, input0, sizeof(input0)),
2426 ANEURALNETWORKS_NO_ERROR);
2427 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, input1, sizeof(input1)),
2428 ANEURALNETWORKS_NO_ERROR);
2429 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, input2, sizeof(input2)),
2430 ANEURALNETWORKS_NO_ERROR);
2431 EXPECT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, output0, sizeof(output0)),
2432 ANEURALNETWORKS_NO_ERROR);
2433
2434 uint32_t rank, dims[4], expectedRank = 1, expectedDims = 2;
2435 // This should fail, because the execution has not yet started to compute.
2436 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, 0, &rank),
2437 ANEURALNETWORKS_BAD_STATE);
2438 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, 0, dims),
2439 ANEURALNETWORKS_BAD_STATE);
2440
2441 ANeuralNetworksEvent* event;
2442 EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, &event), ANEURALNETWORKS_NO_ERROR);
2443 EXPECT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR);
2444
2445 // This should fail, because unexpected nullptr.
2446 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(nullptr, 0, &rank),
2447 ANEURALNETWORKS_UNEXPECTED_NULL);
2448 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(nullptr, 0, dims),
2449 ANEURALNETWORKS_UNEXPECTED_NULL);
2450 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, 0, nullptr),
2451 ANEURALNETWORKS_UNEXPECTED_NULL);
2452 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, 0, nullptr),
2453 ANEURALNETWORKS_UNEXPECTED_NULL);
2454
2455 // This should fail, because the operand does not exist.
2456 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, -1, &rank),
2457 ANEURALNETWORKS_BAD_DATA);
2458 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, 999, &rank),
2459 ANEURALNETWORKS_BAD_DATA);
2460 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, -1, dims),
2461 ANEURALNETWORKS_BAD_DATA);
2462 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, 999, dims),
2463 ANEURALNETWORKS_BAD_DATA);
2464
2465 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandRank(execution, 0, &rank),
2466 ANEURALNETWORKS_NO_ERROR);
2467 EXPECT_EQ(ANeuralNetworksExecution_getOutputOperandDimensions(execution, 0, dims),
2468 ANEURALNETWORKS_NO_ERROR);
2469 EXPECT_EQ(rank, expectedRank);
2470 EXPECT_EQ(dims[0], expectedDims);
2471
2472 // close memory
2473 ANeuralNetworksEvent_free(event);
2474 ANeuralNetworksExecution_free(execution);
2475 }
2476
2477 // Regression test for b/146044137.
2478 class ValidationTestDimensionProductOverflow : public ValidationTestExecution {
2479 protected:
createModel()2480 void createModel() override {
2481 uint32_t dimensions[] = {5, 4, 4, 0, 5, 3, 0, 4, 5};
2482 ANeuralNetworksOperandType operandType = {
2483 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2484 .dimensionCount = std::size(dimensions),
2485 .dimensions = dimensions,
2486 };
2487 addOperand(operandType);
2488 addOperand(operandType);
2489 ASSERT_EQ(addOperation(ANEURALNETWORKS_ABS, {0}, {1}), ANEURALNETWORKS_NO_ERROR);
2490 ASSERT_EQ(identifyInputsAndOutputs({0}, {1}), ANEURALNETWORKS_NO_ERROR);
2491 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_NO_ERROR);
2492 }
2493 };
2494
TEST_F(ValidationTestDimensionProductOverflow,SetInputOrOutput)2495 TEST_F(ValidationTestDimensionProductOverflow, SetInputOrOutput) {
2496 uint32_t dimensions[] = {5, 4, 4, 786433, 5, 3, 16777216, 4, 5};
2497 ANeuralNetworksOperandType operandType = {
2498 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2499 .dimensionCount = std::size(dimensions),
2500 .dimensions = dimensions,
2501 };
2502 uint8_t buffer[20];
2503 // This should fail, as the new operand type's dimension product overflows
2504 // uint32_t.
2505 EXPECT_EQ(
2506 ANeuralNetworksExecution_setInput(mExecution, 0, &operandType, buffer, sizeof(buffer)),
2507 ANEURALNETWORKS_BAD_DATA);
2508 EXPECT_EQ(
2509 ANeuralNetworksExecution_setOutput(mExecution, 0, &operandType, buffer, sizeof(buffer)),
2510 ANEURALNETWORKS_BAD_DATA);
2511 }
2512
TEST_F(ValidationTestModel,AddOperandDimensionProductOverflow)2513 TEST_F(ValidationTestModel, AddOperandDimensionProductOverflow) {
2514 uint32_t dimensions[] = {5, 4, 4, 786433, 5, 3, 16777216, 4, 5};
2515 ANeuralNetworksOperandType operandType = {
2516 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2517 .dimensionCount = std::size(dimensions),
2518 .dimensions = dimensions,
2519 };
2520 // This should fail, as the operand type's dimension product overflows uint32_t.
2521 ASSERT_EQ(ANeuralNetworksModel_addOperand(mModel, &operandType), ANEURALNETWORKS_BAD_DATA);
2522 }
2523
2524 class ValidationTestDimensionProductOverflow2 : public ValidationTestExecution {
2525 protected:
createModel()2526 void createModel() override {
2527 addTensorOperand(ANEURALNETWORKS_TENSOR_FLOAT32, {0, 1});
2528 addTensorOperand(ANEURALNETWORKS_TENSOR_FLOAT32, {0, 1});
2529 addTensorOperand(ANEURALNETWORKS_TENSOR_FLOAT32, {0});
2530 addScalarOperand(ANEURALNETWORKS_INT32);
2531 int32_t activation = 0;
2532 ASSERT_EQ(ANeuralNetworksModel_setOperandValue(mModel, 3, &activation, sizeof(activation)),
2533 ANEURALNETWORKS_NO_ERROR);
2534 addTensorOperand(ANEURALNETWORKS_TENSOR_FLOAT32, {0, 0});
2535 ASSERT_EQ(addOperation(ANEURALNETWORKS_FULLY_CONNECTED, {0, 1, 2, 3}, {4}),
2536 ANEURALNETWORKS_NO_ERROR);
2537 ASSERT_EQ(identifyInputsAndOutputs({0, 1, 2}, {4}), ANEURALNETWORKS_NO_ERROR);
2538 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_NO_ERROR);
2539 }
2540 };
2541
TEST_F(ValidationTestDimensionProductOverflow2,DynamicOutputShapeOverflow)2542 TEST_F(ValidationTestDimensionProductOverflow2, DynamicOutputShapeOverflow) {
2543 constexpr uint32_t kLargeDim = 1 << 16;
2544 std::vector<float> inputData(kLargeDim), outputData(kLargeDim);
2545 const uint32_t inputDims[] = {kLargeDim, 1};
2546 const uint32_t biasDims[] = {kLargeDim};
2547 const ANeuralNetworksOperandType inputType = {
2548 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2549 .dimensionCount = std::size(inputDims),
2550 .dimensions = inputDims,
2551 };
2552 const ANeuralNetworksOperandType biasType = {
2553 .type = ANEURALNETWORKS_TENSOR_FLOAT32,
2554 .dimensionCount = std::size(biasDims),
2555 .dimensions = biasDims,
2556 };
2557 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, &inputType, inputData.data(),
2558 inputData.size() * sizeof(float)),
2559 ANEURALNETWORKS_NO_ERROR);
2560 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 1, &inputType, inputData.data(),
2561 inputData.size() * sizeof(float)),
2562 ANEURALNETWORKS_NO_ERROR);
2563 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 2, &biasType, inputData.data(),
2564 inputData.size() * sizeof(float)),
2565 ANEURALNETWORKS_NO_ERROR);
2566 EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, outputData.data(),
2567 outputData.size() * sizeof(float)),
2568 ANEURALNETWORKS_NO_ERROR);
2569
2570 // This should fail, because the deduced output data size overflows uint32_t.
2571 EXPECT_NE(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
2572 }
2573
TEST_F(ValidationTestBurst,BurstComputeNull)2574 TEST_F(ValidationTestBurst, BurstComputeNull) {
2575 EXPECT_EQ(ANeuralNetworksExecution_burstCompute(mExecution, nullptr),
2576 ANEURALNETWORKS_UNEXPECTED_NULL);
2577 EXPECT_EQ(ANeuralNetworksExecution_burstCompute(nullptr, mBurst),
2578 ANEURALNETWORKS_UNEXPECTED_NULL);
2579 }
2580
TEST_F(ValidationTestBurst,BurstComputeBadCompilation)2581 TEST_F(ValidationTestBurst, BurstComputeBadCompilation) {
2582 ANeuralNetworksCompilation* compilation;
2583 ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &compilation), ANEURALNETWORKS_NO_ERROR);
2584 // NOTE: ANeuralNetworksCompilation_finish not called
2585
2586 ANeuralNetworksBurst* burst;
2587 EXPECT_EQ(ANeuralNetworksBurst_create(compilation, &burst), ANEURALNETWORKS_BAD_STATE);
2588
2589 // close memory
2590 ANeuralNetworksBurst_free(burst);
2591 ANeuralNetworksCompilation_free(compilation);
2592 }
2593
TEST_F(ValidationTestBurst,BurstComputeDifferentCompilations)2594 TEST_F(ValidationTestBurst, BurstComputeDifferentCompilations) {
2595 ANeuralNetworksCompilation* secondCompilation;
2596 ASSERT_EQ(ANeuralNetworksCompilation_create(mModel, &secondCompilation),
2597 ANEURALNETWORKS_NO_ERROR);
2598 ASSERT_EQ(ANeuralNetworksCompilation_finish(secondCompilation), ANEURALNETWORKS_NO_ERROR);
2599
2600 ANeuralNetworksExecution* execution;
2601 EXPECT_EQ(ANeuralNetworksExecution_create(secondCompilation, &execution),
2602 ANEURALNETWORKS_NO_ERROR);
2603
2604 EXPECT_EQ(ANeuralNetworksExecution_burstCompute(execution, mBurst), ANEURALNETWORKS_BAD_DATA);
2605
2606 ANeuralNetworksExecution_free(execution);
2607 ANeuralNetworksCompilation_free(secondCompilation);
2608 }
2609
TEST_F(ValidationTestBurst,BurstComputeConcurrent)2610 TEST_F(ValidationTestBurst, BurstComputeConcurrent) {
2611 ANeuralNetworksExecution* secondExecution;
2612 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &secondExecution),
2613 ANEURALNETWORKS_NO_ERROR);
2614
2615 // set inputs of first execution
2616 float inputA0[] = {1.0f, 1.0f}, inputA1[] = {2.0f, 2.0f}, outputA0[2];
2617 int32_t inputA2[] = {0};
2618 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, inputA0, sizeof(inputA0)),
2619 ANEURALNETWORKS_NO_ERROR);
2620 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 1, nullptr, inputA1, sizeof(inputA1)),
2621 ANEURALNETWORKS_NO_ERROR);
2622 EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 2, nullptr, inputA2, sizeof(inputA2)),
2623 ANEURALNETWORKS_NO_ERROR);
2624 EXPECT_EQ(
2625 ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, outputA0, sizeof(outputA0)),
2626 ANEURALNETWORKS_NO_ERROR);
2627
2628 // set inputs of second execution
2629 float inputB0[] = {1.0f, 1.0f}, inputB1[] = {2.0f, 2.0f}, outputB0[2];
2630 int32_t inputB2[] = {0};
2631 EXPECT_EQ(ANeuralNetworksExecution_setInput(secondExecution, 0, nullptr, inputB0,
2632 sizeof(inputB0)),
2633 ANEURALNETWORKS_NO_ERROR);
2634 EXPECT_EQ(ANeuralNetworksExecution_setInput(secondExecution, 1, nullptr, inputB1,
2635 sizeof(inputB1)),
2636 ANEURALNETWORKS_NO_ERROR);
2637 EXPECT_EQ(ANeuralNetworksExecution_setInput(secondExecution, 2, nullptr, inputB2,
2638 sizeof(inputB2)),
2639 ANEURALNETWORKS_NO_ERROR);
2640 EXPECT_EQ(ANeuralNetworksExecution_setOutput(secondExecution, 0, nullptr, outputB0,
2641 sizeof(outputB0)),
2642 ANEURALNETWORKS_NO_ERROR);
2643
2644 // Execute on the same burst concurrently. At least one result must be
2645 // ANEURALNETWORKS_NO_ERROR. One may return ANEURALNETWORKS_BAD_STATE if the
2646 // other is already executing on the burst.
2647 auto first = std::async(std::launch::async, [this] {
2648 return ANeuralNetworksExecution_burstCompute(mExecution, mBurst);
2649 });
2650 auto second = std::async(std::launch::async, [this, secondExecution] {
2651 return ANeuralNetworksExecution_burstCompute(secondExecution, mBurst);
2652 });
2653
2654 const int result1 = first.get();
2655 const int result2 = second.get();
2656 EXPECT_TRUE(result1 == ANEURALNETWORKS_BAD_STATE || result1 == ANEURALNETWORKS_NO_ERROR);
2657 EXPECT_TRUE(result2 == ANEURALNETWORKS_BAD_STATE || result2 == ANEURALNETWORKS_NO_ERROR);
2658 EXPECT_TRUE(result1 == ANEURALNETWORKS_NO_ERROR || result2 == ANEURALNETWORKS_NO_ERROR);
2659
2660 ANeuralNetworksExecution_free(secondExecution);
2661 }
2662
2663 // The burst object maintains a local cache of memory objects. Because the burst
2664 // is intended to live for multiple executions, and because memory might be
2665 // created and freed for each execution, burst includes internal mechanisms to
2666 // purge memory objects from its cache that have been freed by the NNAPI client.
2667 // The following two test cases (FreeMemoryBeforeBurst and
2668 // FreeBurstBeforeMemory) ensure that this internal cleanup is tested in both
2669 // freeing orders.
2670 //
2671 // These two test cases explicitly create a new burst object and a new execution
2672 // object so that the order of freeing can be specified. If these tests instead
2673 // relied on the provided mExecution and mBurst, mBurst would always be freed
2674 // before mExecution.
2675
TEST_F(ValidationTestBurst,FreeMemoryBeforeBurst)2676 TEST_F(ValidationTestBurst, FreeMemoryBeforeBurst) {
2677 ANeuralNetworksBurst* burst;
2678 EXPECT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst), ANEURALNETWORKS_NO_ERROR);
2679
2680 // prepare data for execution
2681 float input0[] = {1.0f, 1.0f}, input1[] = {2.0f, 2.0f}, output0[2];
2682 int32_t input2[] = {0};
2683
2684 const size_t memorySize = sizeof(output0);
2685 #ifdef __ANDROID__
2686 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
2687 #else // __ANDROID__
2688 TemporaryFile tmpFile;
2689 int memoryFd = tmpFile.release();
2690 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
2691 #endif // __ANDROID__
2692 ASSERT_GT(memoryFd, 0);
2693
2694 ANeuralNetworksMemory* memory;
2695 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
2696 &memory),
2697 ANEURALNETWORKS_NO_ERROR);
2698
2699 // create and configure execution
2700 ANeuralNetworksExecution* execution;
2701 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
2702 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, input0, sizeof(input0)),
2703 ANEURALNETWORKS_NO_ERROR);
2704 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, input1, sizeof(input1)),
2705 ANEURALNETWORKS_NO_ERROR);
2706 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, input2, sizeof(input2)),
2707 ANEURALNETWORKS_NO_ERROR);
2708 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0,
2709 sizeof(output0)),
2710 ANEURALNETWORKS_NO_ERROR);
2711
2712 // preform execution to cache memory into burst
2713 EXPECT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst), ANEURALNETWORKS_NO_ERROR);
2714 ANeuralNetworksExecution_free(execution);
2715
2716 // free memory before burst
2717 ANeuralNetworksMemory_free(memory);
2718 ANeuralNetworksBurst_free(burst);
2719
2720 // close memory
2721 close(memoryFd);
2722 }
2723
TEST_F(ValidationTestBurst,FreeBurstBeforeMemory)2724 TEST_F(ValidationTestBurst, FreeBurstBeforeMemory) {
2725 ANeuralNetworksBurst* burst;
2726 EXPECT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst), ANEURALNETWORKS_NO_ERROR);
2727
2728 // prepare data for execution
2729 float input0[] = {1.0f, 1.0f}, input1[] = {2.0f, 2.0f}, output0[2];
2730 int32_t input2[] = {0};
2731 const size_t memorySize = sizeof(output0);
2732 #ifdef __ANDROID__
2733 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
2734 #else // __ANDROID__
2735 TemporaryFile tmpFile;
2736 int memoryFd = tmpFile.release();
2737 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
2738 #endif // __ANDROID__
2739 ASSERT_GT(memoryFd, 0);
2740
2741 ANeuralNetworksMemory* memory;
2742 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
2743 &memory),
2744 ANEURALNETWORKS_NO_ERROR);
2745
2746 // create and configure execution
2747 ANeuralNetworksExecution* execution;
2748 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
2749 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, input0, sizeof(input0)),
2750 ANEURALNETWORKS_NO_ERROR);
2751 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, input1, sizeof(input1)),
2752 ANEURALNETWORKS_NO_ERROR);
2753 EXPECT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, input2, sizeof(input2)),
2754 ANEURALNETWORKS_NO_ERROR);
2755 EXPECT_EQ(ANeuralNetworksExecution_setOutputFromMemory(execution, 0, nullptr, memory, 0,
2756 sizeof(output0)),
2757 ANEURALNETWORKS_NO_ERROR);
2758
2759 // preform execution to cache memory into burst
2760 EXPECT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst), ANEURALNETWORKS_NO_ERROR);
2761 ANeuralNetworksExecution_free(execution);
2762
2763 // free burst before memory
2764 ANeuralNetworksBurst_free(burst);
2765 ANeuralNetworksMemory_free(memory);
2766
2767 // close memory
2768 close(memoryFd);
2769 }
2770
TEST(ValidationTestIntrospection,GetNumDevices)2771 TEST(ValidationTestIntrospection, GetNumDevices) {
2772 uint32_t numDevices = 0;
2773 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2774 EXPECT_EQ(ANeuralNetworks_getDeviceCount(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2775 }
2776
TEST(ValidationTestIntrospection,GetDevice)2777 TEST(ValidationTestIntrospection, GetDevice) {
2778 uint32_t numDevices = 0;
2779 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2780
2781 ANeuralNetworksDevice* device = nullptr;
2782 for (uint32_t i = 0; i < numDevices; i++) {
2783 SCOPED_TRACE(i);
2784 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
2785 EXPECT_NE(device, nullptr);
2786 }
2787 EXPECT_EQ(ANeuralNetworks_getDevice(0, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2788 EXPECT_EQ(ANeuralNetworks_getDevice(numDevices, &device), ANEURALNETWORKS_BAD_DATA);
2789 }
2790
deviceStringCheck(std::function<int (const ANeuralNetworksDevice *,const char **)> func)2791 static void deviceStringCheck(std::function<int(const ANeuralNetworksDevice*, const char**)> func) {
2792 uint32_t numDevices = 0;
2793 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2794
2795 const char* buffer;
2796 for (uint32_t i = 0; i < numDevices; i++) {
2797 SCOPED_TRACE(i);
2798 ANeuralNetworksDevice* device;
2799 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
2800 EXPECT_EQ(func(device, &buffer), ANEURALNETWORKS_NO_ERROR);
2801 EXPECT_EQ(func(device, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2802 }
2803 EXPECT_EQ(func(nullptr, &buffer), ANEURALNETWORKS_UNEXPECTED_NULL);
2804 EXPECT_EQ(func(nullptr, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2805 }
2806
TEST(ValidationTestIntrospection,DeviceGetName)2807 TEST(ValidationTestIntrospection, DeviceGetName) {
2808 deviceStringCheck(ANeuralNetworksDevice_getName);
2809 }
2810
TEST(ValidationTestIntrospection,DeviceGetNameUnique)2811 TEST(ValidationTestIntrospection, DeviceGetNameUnique) {
2812 uint32_t numDevices = 0;
2813 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2814
2815 std::set<std::string> deviceNames;
2816 for (uint32_t i = 0; i < numDevices; i++) {
2817 ANeuralNetworksDevice* device = nullptr;
2818 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
2819 const char* buffer = nullptr;
2820 EXPECT_EQ(ANeuralNetworksDevice_getName(device, &buffer), ANEURALNETWORKS_NO_ERROR);
2821 std::string name(buffer);
2822 EXPECT_EQ(deviceNames.count(name), (uint32_t)0);
2823 deviceNames.insert(name);
2824 }
2825 }
2826
TEST(ValidationTestIntrospection,DeviceGetVersion)2827 TEST(ValidationTestIntrospection, DeviceGetVersion) {
2828 deviceStringCheck(ANeuralNetworksDevice_getVersion);
2829 }
2830
TEST(ValidationTestIntrospection,DeviceGetFeatureLevel)2831 TEST(ValidationTestIntrospection, DeviceGetFeatureLevel) {
2832 uint32_t numDevices = 0;
2833 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2834
2835 int64_t featureLevel;
2836 for (uint32_t i = 0; i < numDevices; i++) {
2837 SCOPED_TRACE(i);
2838 ANeuralNetworksDevice* device;
2839 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
2840 EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, &featureLevel),
2841 ANEURALNETWORKS_NO_ERROR);
2842 EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(device, nullptr),
2843 ANEURALNETWORKS_UNEXPECTED_NULL);
2844 }
2845 EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(nullptr, &featureLevel),
2846 ANEURALNETWORKS_UNEXPECTED_NULL);
2847 EXPECT_EQ(ANeuralNetworksDevice_getFeatureLevel(nullptr, nullptr),
2848 ANEURALNETWORKS_UNEXPECTED_NULL);
2849 }
2850
TEST(ValidationTestIntrospection,DeviceGetType)2851 TEST(ValidationTestIntrospection, DeviceGetType) {
2852 uint32_t numDevices = 0;
2853 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2854
2855 int32_t validTypes[] = {ANEURALNETWORKS_DEVICE_UNKNOWN, ANEURALNETWORKS_DEVICE_OTHER,
2856 ANEURALNETWORKS_DEVICE_CPU, ANEURALNETWORKS_DEVICE_GPU,
2857 ANEURALNETWORKS_DEVICE_ACCELERATOR};
2858 int32_t deviceType;
2859 for (uint32_t i = 0; i < numDevices; i++) {
2860 SCOPED_TRACE(i);
2861 // Initialize the deviceType to be an invalid type.
2862 deviceType = -1;
2863 ANeuralNetworksDevice* device;
2864 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
2865 EXPECT_EQ(ANeuralNetworksDevice_getType(device, &deviceType), ANEURALNETWORKS_NO_ERROR);
2866 EXPECT_TRUE(std::find(std::begin(validTypes), std::end(validTypes), deviceType) !=
2867 std::end(validTypes));
2868 EXPECT_EQ(ANeuralNetworksDevice_getType(device, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2869 }
2870 EXPECT_EQ(ANeuralNetworksDevice_getType(nullptr, &deviceType), ANEURALNETWORKS_UNEXPECTED_NULL);
2871 EXPECT_EQ(ANeuralNetworksDevice_getType(nullptr, nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2872 }
2873
TEST(ValidationTestIntrospection,DeviceWait)2874 TEST(ValidationTestIntrospection, DeviceWait) {
2875 uint32_t numDevices = 0;
2876 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2877
2878 for (uint32_t i = 0; i < numDevices; i++) {
2879 SCOPED_TRACE(i);
2880 ANeuralNetworksDevice* device;
2881 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
2882 EXPECT_EQ(ANeuralNetworksDevice_wait(device), ANEURALNETWORKS_NO_ERROR);
2883 }
2884 EXPECT_EQ(ANeuralNetworksDevice_wait(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2885 }
2886
2887 class ValidationTestCompilationForDevices_1 : public ValidationTestModel {
2888 protected:
SetUp()2889 virtual void SetUp() override {
2890 ValidationTestModel::SetUp();
2891 createModel();
2892
2893 uint32_t numDevices = 0;
2894 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
2895
2896 if (numDevices > 0) {
2897 EXPECT_EQ(ANeuralNetworks_getDevice(0, &mDevice), ANEURALNETWORKS_NO_ERROR);
2898 bool supported = false;
2899 ASSERT_EQ(mNumOperations, static_cast<uint32_t>(1));
2900 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, &mDevice, 1,
2901 &supported),
2902 ANEURALNETWORKS_NO_ERROR);
2903 if (supported) {
2904 ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, &mDevice, 1,
2905 &mCompilation),
2906 ANEURALNETWORKS_NO_ERROR);
2907 }
2908 }
2909 }
2910
TearDown()2911 virtual void TearDown() {
2912 ANeuralNetworksCompilation_free(mCompilation);
2913 ValidationTestModel::TearDown();
2914 }
2915
2916 ANeuralNetworksDevice* mDevice = nullptr;
2917 ANeuralNetworksCompilation* mCompilation = nullptr;
2918 };
2919
2920 // Also see TEST_F(ValidationTestCompilation, SetPreference)
TEST_F(ValidationTestCompilationForDevices_1,SetPreference)2921 TEST_F(ValidationTestCompilationForDevices_1, SetPreference) {
2922 EXPECT_EQ(ANeuralNetworksCompilation_setPreference(nullptr, ANEURALNETWORKS_PREFER_LOW_POWER),
2923 ANEURALNETWORKS_UNEXPECTED_NULL);
2924 if (!mCompilation) {
2925 return;
2926 }
2927 EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation, 40), ANEURALNETWORKS_BAD_DATA);
2928 }
2929
2930 // Also see TEST_F(ValidationTestCompilation, SetCaching)
TEST_F(ValidationTestCompilationForDevices_1,SetCaching)2931 TEST_F(ValidationTestCompilationForDevices_1, SetCaching) {
2932 std::vector<uint8_t> token(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
2933 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(nullptr, NN_TMP_DIR, token.data()),
2934 ANEURALNETWORKS_UNEXPECTED_NULL);
2935 if (!mCompilation) {
2936 return;
2937 }
2938 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(mCompilation, nullptr, token.data()),
2939 ANEURALNETWORKS_UNEXPECTED_NULL);
2940 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(mCompilation, NN_TMP_DIR, nullptr),
2941 ANEURALNETWORKS_UNEXPECTED_NULL);
2942 }
2943
2944 // Also see TEST_F(ValidationTestCompilation, CreateExecution)
TEST_F(ValidationTestCompilationForDevices_1,CreateExecution)2945 TEST_F(ValidationTestCompilationForDevices_1, CreateExecution) {
2946 ANeuralNetworksExecution* execution = nullptr;
2947 EXPECT_EQ(ANeuralNetworksExecution_create(nullptr, &execution),
2948 ANEURALNETWORKS_UNEXPECTED_NULL);
2949 if (!mCompilation) {
2950 return;
2951 }
2952 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, nullptr),
2953 ANEURALNETWORKS_UNEXPECTED_NULL);
2954 EXPECT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_BAD_STATE);
2955 }
2956
2957 // Also see TEST_F(ValidationTestCompilation, Finish)
TEST_F(ValidationTestCompilationForDevices_1,Finish)2958 TEST_F(ValidationTestCompilationForDevices_1, Finish) {
2959 EXPECT_EQ(ANeuralNetworksCompilation_finish(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
2960 if (!mCompilation) {
2961 return;
2962 }
2963 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
2964 EXPECT_EQ(ANeuralNetworksCompilation_setPreference(mCompilation,
2965 ANEURALNETWORKS_PREFER_FAST_SINGLE_ANSWER),
2966 ANEURALNETWORKS_BAD_STATE);
2967 EXPECT_EQ(
2968 ANeuralNetworksCompilation_setPriority(mCompilation, ANEURALNETWORKS_PRIORITY_DEFAULT),
2969 ANEURALNETWORKS_BAD_STATE);
2970 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(mCompilation, kShortWaitInNanoseconds),
2971 ANEURALNETWORKS_BAD_STATE);
2972 std::vector<uint8_t> token(ANEURALNETWORKS_BYTE_SIZE_OF_CACHE_TOKEN, 0);
2973 EXPECT_EQ(ANeuralNetworksCompilation_setCaching(mCompilation, NN_TMP_DIR, token.data()),
2974 ANEURALNETWORKS_BAD_STATE);
2975 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_STATE);
2976 }
2977
2978 // Also see TEST_F(ValidationTestCompilation, SetTimeout)
2979 // Also see TEST_F(ValidationTestCompilationForDevices_2, SetTimeout)
TEST_F(ValidationTestCompilationForDevices_1,SetTimeout)2980 TEST_F(ValidationTestCompilationForDevices_1, SetTimeout) {
2981 if (!mCompilation) {
2982 return;
2983 }
2984
2985 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(mCompilation, kShortWaitInNanoseconds),
2986 ANEURALNETWORKS_NO_ERROR);
2987
2988 // Attempt to finish
2989 const int n = ANeuralNetworksCompilation_finish(mCompilation);
2990 EXPECT_TRUE(n == ANEURALNETWORKS_NO_ERROR || n == ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT ||
2991 n == ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT);
2992 }
2993
TEST_F(ValidationTestCompilationForDevices_1,SetTimeoutMaximum)2994 TEST_F(ValidationTestCompilationForDevices_1, SetTimeoutMaximum) {
2995 if (!mCompilation) {
2996 return;
2997 }
2998
2999 constexpr uint64_t duration = std::numeric_limits<uint64_t>::max();
3000 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(mCompilation, duration),
3001 ANEURALNETWORKS_NO_ERROR);
3002 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3003 }
3004
3005 class ValidationTestCompilationForDevices_2 : public ValidationTestModel {
3006 protected:
SetUp()3007 virtual void SetUp() override {
3008 ValidationTestModel::SetUp();
3009 createModel();
3010
3011 uint32_t numDevices = 0;
3012 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
3013
3014 if (numDevices > 1) {
3015 EXPECT_EQ(ANeuralNetworks_getDevice(0, &mDevices[0]), ANEURALNETWORKS_NO_ERROR);
3016 EXPECT_EQ(ANeuralNetworks_getDevice(1, &mDevices[1]), ANEURALNETWORKS_NO_ERROR);
3017 bool supported = false;
3018 ASSERT_EQ(mNumOperations, static_cast<uint32_t>(1));
3019 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, mDevices, 2,
3020 &supported),
3021 ANEURALNETWORKS_NO_ERROR);
3022 if (supported) {
3023 ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, mDevices, 2,
3024 &mCompilation),
3025 ANEURALNETWORKS_NO_ERROR);
3026 }
3027 }
3028 }
3029
TearDown()3030 virtual void TearDown() {
3031 ANeuralNetworksCompilation_free(mCompilation);
3032 ValidationTestModel::TearDown();
3033 }
3034
3035 ANeuralNetworksDevice* mDevices[2] = {nullptr, nullptr};
3036 ANeuralNetworksCompilation* mCompilation = nullptr;
3037 };
3038
3039 // Also see TEST_F(ValidationTestCompilation, SetTimeout)
3040 // Also see TEST_F(ValidationTestCompilationForDevices_1, SetTimeout)
TEST_F(ValidationTestCompilationForDevices_2,SetTimeout)3041 TEST_F(ValidationTestCompilationForDevices_2, SetTimeout) {
3042 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(nullptr, kShortWaitInNanoseconds),
3043 ANEURALNETWORKS_UNEXPECTED_NULL);
3044 if (!mCompilation) {
3045 return;
3046 }
3047 // Timeouts can only be set on Compilations created from CompilationForDevices with one device
3048 // specified.
3049 EXPECT_EQ(ANeuralNetworksCompilation_setTimeout(mCompilation, kShortWaitInNanoseconds),
3050 ANEURALNETWORKS_BAD_DATA);
3051 }
3052
3053 // Also see TEST_F(ValidationTestCompilation, ExecutionSetTimeout)
3054 // Also see TEST_F(ValidationTestCompilationForDevices_1, ExecutionSetTimeout)
TEST_F(ValidationTestCompilationForDevices_2,ExecutionSetTimeout)3055 TEST_F(ValidationTestCompilationForDevices_2, ExecutionSetTimeout) {
3056 EXPECT_EQ(ANeuralNetworksExecution_setTimeout(nullptr, kShortWaitInNanoseconds),
3057 ANEURALNETWORKS_UNEXPECTED_NULL);
3058
3059 if (!mCompilation) {
3060 return;
3061 }
3062 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3063 ANeuralNetworksExecution* execution;
3064 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
3065 // Timeouts can only be set on Compilations created from CompilationForDevices with one device
3066 // specified.
3067 EXPECT_EQ(ANeuralNetworksExecution_setTimeout(execution, kShortWaitInNanoseconds),
3068 ANEURALNETWORKS_BAD_DATA);
3069 ANeuralNetworksExecution_free(execution);
3070 }
3071
3072 // Also see TEST_F(ValidationTestCompilation, ExecutionTiming)
3073 // Also see TEST_F(ValidationTestCompilationForDevices_1, ExecutionTiming)
TEST_F(ValidationTestCompilationForDevices_2,ExecutionTiming)3074 TEST_F(ValidationTestCompilationForDevices_2, ExecutionTiming) {
3075 if (!mCompilation) {
3076 return;
3077 }
3078 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3079 ANeuralNetworksExecution* execution;
3080 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
3081 // Cannot setMeasureTiming() if there are two or more devices.
3082 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, false),
3083 ANEURALNETWORKS_BAD_DATA);
3084 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, true), ANEURALNETWORKS_BAD_DATA);
3085
3086 // close memory
3087 ANeuralNetworksExecution_free(execution);
3088 }
3089
3090 class ValidationTestInvalidCompilation : public ValidationTestModel {
3091 protected:
SetUp()3092 virtual void SetUp() override {
3093 ValidationTestModel::SetUp();
3094
3095 // Create a model with an OEM operation
3096 uint32_t dimensions[]{1};
3097 ANeuralNetworksOperandType OEMTensorType{.type = ANEURALNETWORKS_TENSOR_OEM_BYTE,
3098 .dimensionCount = 1,
3099 .dimensions = dimensions};
3100 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMTensorType),
3101 ANEURALNETWORKS_NO_ERROR);
3102 EXPECT_EQ(ANeuralNetworksModel_addOperand(mModel, &OEMTensorType),
3103 ANEURALNETWORKS_NO_ERROR);
3104 ASSERT_EQ(addOperation(ANEURALNETWORKS_OEM_OPERATION, {0}, {1}), ANEURALNETWORKS_NO_ERROR);
3105 ASSERT_EQ(identifyInputsAndOutputs({0}, {1}), ANEURALNETWORKS_NO_ERROR);
3106 ASSERT_EQ(modelFinish(), ANEURALNETWORKS_NO_ERROR);
3107
3108 // Find a device that cannot handle OEM operation and create compilation on that
3109 uint32_t numDevices = 0;
3110 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
3111 for (uint32_t i = 0; i < numDevices; i++) {
3112 ANeuralNetworksDevice* device;
3113 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
3114 bool supported = false;
3115 EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(mModel, &device, 1,
3116 &supported),
3117 ANEURALNETWORKS_NO_ERROR);
3118 if (!supported) {
3119 ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel, &device, 1,
3120 &mInvalidCompilation),
3121 ANEURALNETWORKS_NO_ERROR);
3122 break;
3123 }
3124 }
3125 if (mInvalidCompilation) {
3126 ASSERT_EQ(ANeuralNetworksCompilation_finish(mInvalidCompilation),
3127 ANEURALNETWORKS_BAD_DATA);
3128 }
3129 }
3130
TearDown()3131 virtual void TearDown() {
3132 ANeuralNetworksCompilation_free(mInvalidCompilation);
3133 ValidationTestModel::TearDown();
3134 }
3135
3136 ANeuralNetworksCompilation* mInvalidCompilation = nullptr;
3137 };
3138
TEST_F(ValidationTestInvalidCompilation,GetPreferredMemoryAlignmentAndPadding)3139 TEST_F(ValidationTestInvalidCompilation, GetPreferredMemoryAlignmentAndPadding) {
3140 if (__builtin_available(android __NNAPI_FL5_MIN_ANDROID_API__, *)) {
3141 if (!mInvalidCompilation) {
3142 return;
3143 }
3144 uint32_t result;
3145 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForInput(
3146 mInvalidCompilation, 0, &result),
3147 ANEURALNETWORKS_BAD_STATE);
3148 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForInput(mInvalidCompilation,
3149 0, &result),
3150 ANEURALNETWORKS_BAD_STATE);
3151 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryAlignmentForOutput(
3152 mInvalidCompilation, 0, &result),
3153 ANEURALNETWORKS_BAD_STATE);
3154 EXPECT_EQ(ANeuralNetworksCompilation_getPreferredMemoryPaddingForOutput(mInvalidCompilation,
3155 0, &result),
3156 ANEURALNETWORKS_BAD_STATE);
3157 } else {
3158 GTEST_SKIP();
3159 }
3160 }
3161
TEST_F(ValidationTestInvalidCompilation,CreateExecution)3162 TEST_F(ValidationTestInvalidCompilation, CreateExecution) {
3163 if (!mInvalidCompilation) {
3164 return;
3165 }
3166 ANeuralNetworksExecution* execution = nullptr;
3167 EXPECT_EQ(ANeuralNetworksExecution_create(mInvalidCompilation, &execution),
3168 ANEURALNETWORKS_BAD_STATE);
3169 ANeuralNetworksExecution_free(execution);
3170 }
3171
TEST_F(ValidationTestInvalidCompilation,MemoryDescAddRole)3172 TEST_F(ValidationTestInvalidCompilation, MemoryDescAddRole) {
3173 if (!mInvalidCompilation) {
3174 return;
3175 }
3176 ANeuralNetworksMemoryDesc* desc = nullptr;
3177 ASSERT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3178 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, mInvalidCompilation, 0, 1.0f),
3179 ANEURALNETWORKS_BAD_DATA);
3180 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, mInvalidCompilation, 0, 1.0f),
3181 ANEURALNETWORKS_BAD_DATA);
3182 ANeuralNetworksMemoryDesc_free(desc);
3183 }
3184
3185 // Also see TEST_F(ValidationTestCompilation, ExecutionTiming)
3186 // Also see TEST_F(ValidationTestCompilationForDevices_2, ExecutionTiming)
3187 // Also see TEST_F(ValidationTestCompilation, ExecutionUsability)
TEST_F(ValidationTestCompilationForDevices_1,ExecutionTiming)3188 TEST_F(ValidationTestCompilationForDevices_1, ExecutionTiming) {
3189 if (!mCompilation) {
3190 return;
3191 }
3192 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3193
3194 enum class ExecutionType : uint32_t { ASYNC, SYNC, BURST, FENCED };
3195 for (auto executionType :
3196 {ExecutionType::ASYNC, ExecutionType::SYNC, ExecutionType::BURST, ExecutionType::FENCED}) {
3197 SCOPED_TRACE(static_cast<uint32_t>(executionType));
3198
3199 ANeuralNetworksExecution* execution;
3200 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution),
3201 ANEURALNETWORKS_NO_ERROR);
3202
3203 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(nullptr, false),
3204 ANEURALNETWORKS_UNEXPECTED_NULL);
3205 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(nullptr, true),
3206 ANEURALNETWORKS_UNEXPECTED_NULL);
3207 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, false),
3208 ANEURALNETWORKS_NO_ERROR);
3209 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, true),
3210 ANEURALNETWORKS_NO_ERROR);
3211
3212 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2];
3213 int in2 = 0;
3214 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)),
3215 ANEURALNETWORKS_NO_ERROR);
3216 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)),
3217 ANEURALNETWORKS_NO_ERROR);
3218 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)),
3219 ANEURALNETWORKS_NO_ERROR);
3220 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)),
3221 ANEURALNETWORKS_NO_ERROR);
3222
3223 // Cannot getDuration until the execution has finished.
3224 uint64_t duration;
3225 EXPECT_EQ(ANeuralNetworksExecution_getDuration(
3226 execution, ANEURALNETWORKS_DURATION_ON_HARDWARE, &duration),
3227 ANEURALNETWORKS_BAD_STATE);
3228 EXPECT_EQ(ANeuralNetworksExecution_getDuration(
3229 execution, ANEURALNETWORKS_DURATION_IN_DRIVER, &duration),
3230 ANEURALNETWORKS_BAD_STATE);
3231
3232 auto testSetTimeoutTooLate = [execution] {
3233 // Cannot setTimeout if the execution has started.
3234 EXPECT_EQ(ANeuralNetworksExecution_setTimeout(execution, kShortWaitInNanoseconds),
3235 ANEURALNETWORKS_BAD_STATE);
3236 };
3237
3238 auto testMeasureTooLate = [execution] {
3239 // Cannot setMeasureTiming if the execution has started.
3240 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, false),
3241 ANEURALNETWORKS_BAD_STATE);
3242 EXPECT_EQ(ANeuralNetworksExecution_setMeasureTiming(execution, true),
3243 ANEURALNETWORKS_BAD_STATE);
3244 };
3245
3246 // Compute.
3247 switch (executionType) {
3248 case ExecutionType::ASYNC: {
3249 ANeuralNetworksEvent* event;
3250 ASSERT_EQ(ANeuralNetworksExecution_startCompute(execution, &event),
3251 ANEURALNETWORKS_NO_ERROR);
3252 testMeasureTooLate();
3253 ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR);
3254 testSetTimeoutTooLate();
3255 testMeasureTooLate();
3256 ANeuralNetworksEvent_free(event);
3257 break;
3258 }
3259 case ExecutionType::SYNC: {
3260 ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_NO_ERROR);
3261 testSetTimeoutTooLate();
3262 testMeasureTooLate();
3263 break;
3264 }
3265 case ExecutionType::BURST: {
3266 ANeuralNetworksBurst* burst;
3267 ASSERT_EQ(ANeuralNetworksBurst_create(mCompilation, &burst),
3268 ANEURALNETWORKS_NO_ERROR);
3269 ASSERT_EQ(ANeuralNetworksExecution_burstCompute(execution, burst),
3270 ANEURALNETWORKS_NO_ERROR);
3271 testSetTimeoutTooLate();
3272 testMeasureTooLate();
3273 ANeuralNetworksBurst_free(burst);
3274 break;
3275 }
3276 case ExecutionType::FENCED: {
3277 ANeuralNetworksEvent* event = nullptr;
3278 ASSERT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution, nullptr,
3279 0, 0, &event),
3280 ANEURALNETWORKS_NO_ERROR);
3281 testMeasureTooLate();
3282 ASSERT_EQ(ANeuralNetworksEvent_wait(event), ANEURALNETWORKS_NO_ERROR);
3283 testSetTimeoutTooLate();
3284 testMeasureTooLate();
3285 ANeuralNetworksEvent_free(event);
3286 break;
3287 }
3288 default:
3289 FAIL() << "Unreachable";
3290 }
3291
3292 auto testDuration = [](ANeuralNetworksExecution* e, int32_t durationCode,
3293 bool nullDuration) {
3294 SCOPED_TRACE(e);
3295 SCOPED_TRACE(durationCode);
3296 SCOPED_TRACE(nullDuration);
3297
3298 // Strictly speaking, a duration COULD have this value, but it is
3299 // exceedingly unlikely. We'll use it as an initial value that we expect
3300 // to be modified by getDuration().
3301 const uint64_t kBogusDuration = UINT64_MAX - 1;
3302
3303 uint64_t duration = kBogusDuration;
3304 uint64_t* durationPtr = nullDuration ? nullptr : &duration;
3305
3306 int expectedResultCode = ANEURALNETWORKS_NO_ERROR;
3307 if (e == nullptr | durationPtr == nullptr) {
3308 expectedResultCode = ANEURALNETWORKS_UNEXPECTED_NULL;
3309 } else if (durationCode < 0 ||
3310 durationCode > ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER) {
3311 expectedResultCode = ANEURALNETWORKS_BAD_DATA;
3312 }
3313
3314 EXPECT_EQ(ANeuralNetworksExecution_getDuration(e, durationCode, durationPtr),
3315 expectedResultCode);
3316 if (expectedResultCode == ANEURALNETWORKS_NO_ERROR) {
3317 EXPECT_NE(duration, kBogusDuration);
3318 }
3319 };
3320
3321 std::vector<ANeuralNetworksExecution*> executions = {nullptr, execution};
3322 std::vector<int32_t> durationCodes = {-1,
3323 ANEURALNETWORKS_DURATION_ON_HARDWARE,
3324 ANEURALNETWORKS_DURATION_IN_DRIVER,
3325 ANEURALNETWORKS_FENCED_DURATION_ON_HARDWARE,
3326 ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER,
3327 ANEURALNETWORKS_FENCED_DURATION_IN_DRIVER + 1};
3328 std::vector<bool> nullDurations = {false, true};
3329 for (auto e : executions) {
3330 for (auto d : durationCodes) {
3331 for (auto n : nullDurations) {
3332 testDuration(e, d, n);
3333 }
3334 }
3335 }
3336
3337 // close memory
3338 ANeuralNetworksExecution_free(execution);
3339 }
3340 }
3341
3342 enum class TimeoutDurationType { SHORT, MAXIMUM };
createTimeoutDuration(TimeoutDurationType type)3343 uint64_t createTimeoutDuration(TimeoutDurationType type) {
3344 switch (type) {
3345 case TimeoutDurationType::SHORT:
3346 return kShortWaitInNanoseconds;
3347 case TimeoutDurationType::MAXIMUM:
3348 return std::numeric_limits<uint64_t>::max();
3349 }
3350 LOG(FATAL) << "Invalid TimeoutDurationType: " << static_cast<int>(type);
3351 return 0;
3352 }
3353
runExecutionSetTimeoutTest(ANeuralNetworksCompilation * compilation,TimeoutDurationType timeoutDurationType)3354 void runExecutionSetTimeoutTest(ANeuralNetworksCompilation* compilation,
3355 TimeoutDurationType timeoutDurationType) {
3356 if (!compilation) {
3357 return;
3358 }
3359 ASSERT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR);
3360
3361 enum class ExecutionType : uint32_t { ASYNC, SYNC, BURST, FENCED };
3362 for (auto executionType :
3363 {ExecutionType::ASYNC, ExecutionType::SYNC, ExecutionType::BURST, ExecutionType::FENCED}) {
3364 SCOPED_TRACE(static_cast<uint32_t>(executionType));
3365
3366 ANeuralNetworksExecution* execution;
3367 ASSERT_EQ(ANeuralNetworksExecution_create(compilation, &execution),
3368 ANEURALNETWORKS_NO_ERROR);
3369 const auto scoped = android::base::make_scope_guard(
3370 [execution] { ANeuralNetworksExecution_free(execution); });
3371
3372 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2];
3373 int in2 = 0;
3374 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)),
3375 ANEURALNETWORKS_NO_ERROR);
3376 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)),
3377 ANEURALNETWORKS_NO_ERROR);
3378 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)),
3379 ANEURALNETWORKS_NO_ERROR);
3380 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)),
3381 ANEURALNETWORKS_NO_ERROR);
3382
3383 const uint64_t timeoutDuration = createTimeoutDuration(timeoutDurationType);
3384 EXPECT_EQ(ANeuralNetworksExecution_setTimeout(execution, timeoutDuration),
3385 ANEURALNETWORKS_NO_ERROR);
3386
3387 const auto checkResult = [timeoutDurationType](int n) {
3388 switch (timeoutDurationType) {
3389 case TimeoutDurationType::SHORT:
3390 EXPECT_TRUE(n == ANEURALNETWORKS_NO_ERROR ||
3391 n == ANEURALNETWORKS_MISSED_DEADLINE_TRANSIENT ||
3392 n == ANEURALNETWORKS_MISSED_DEADLINE_PERSISTENT);
3393 return;
3394 case TimeoutDurationType::MAXIMUM:
3395 EXPECT_EQ(n, ANEURALNETWORKS_NO_ERROR);
3396 return;
3397 }
3398 LOG(FATAL) << "Invalid TimeoutDurationType: " << static_cast<int>(timeoutDurationType);
3399 };
3400
3401 // Compute.
3402 switch (executionType) {
3403 case ExecutionType::ASYNC: {
3404 ANeuralNetworksEvent* event = nullptr;
3405 EXPECT_EQ(ANeuralNetworksExecution_startCompute(execution, &event),
3406 ANEURALNETWORKS_NO_ERROR);
3407 checkResult(ANeuralNetworksEvent_wait(event));
3408 ANeuralNetworksEvent_free(event);
3409 break;
3410 }
3411 case ExecutionType::SYNC: {
3412 checkResult(ANeuralNetworksExecution_compute(execution));
3413 break;
3414 }
3415 case ExecutionType::BURST: {
3416 ANeuralNetworksBurst* burst;
3417 ASSERT_EQ(ANeuralNetworksBurst_create(compilation, &burst),
3418 ANEURALNETWORKS_NO_ERROR);
3419 checkResult(ANeuralNetworksExecution_burstCompute(execution, burst));
3420 ANeuralNetworksBurst_free(burst);
3421 break;
3422 }
3423 case ExecutionType::FENCED: {
3424 ANeuralNetworksEvent* event = nullptr;
3425 EXPECT_EQ(ANeuralNetworksExecution_startComputeWithDependencies(execution, nullptr,
3426 0, 0, &event),
3427 ANEURALNETWORKS_NO_ERROR);
3428 checkResult(ANeuralNetworksEvent_wait(event));
3429 ANeuralNetworksEvent_free(event);
3430 break;
3431 }
3432 default:
3433 FAIL() << "Unreachable";
3434 }
3435 }
3436 }
3437
3438 // Also see TEST_F(ValidationTestCompilation, ExecutionSetTimeout)
3439 // Also see TEST_F(ValidationTestCompilationForDevices_2, ExecutionSetTimeout)
TEST_F(ValidationTestCompilationForDevices_1,ExecutionSetTimeout)3440 TEST_F(ValidationTestCompilationForDevices_1, ExecutionSetTimeout) {
3441 runExecutionSetTimeoutTest(mCompilation, TimeoutDurationType::SHORT);
3442 }
3443
TEST_F(ValidationTestCompilationForDevices_1,ExecutionSetTimeoutMaximum)3444 TEST_F(ValidationTestCompilationForDevices_1, ExecutionSetTimeoutMaximum) {
3445 runExecutionSetTimeoutTest(mCompilation, TimeoutDurationType::MAXIMUM);
3446 }
3447
TEST_F(ValidationTest,CreateMemoryDesc)3448 TEST_F(ValidationTest, CreateMemoryDesc) {
3449 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
3450 }
3451
TEST_F(ValidationTestMemoryDesc,AddInputRole)3452 TEST_F(ValidationTestMemoryDesc, AddInputRole) {
3453 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(nullptr, mCompilation, 0, 1.0f),
3454 ANEURALNETWORKS_UNEXPECTED_NULL);
3455 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, nullptr, 0, 1.0f),
3456 ANEURALNETWORKS_UNEXPECTED_NULL);
3457
3458 // Unfinished compilation.
3459 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 1.0f),
3460 ANEURALNETWORKS_BAD_DATA);
3461
3462 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3463
3464 // Index out of range.
3465 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 999, 1.0f),
3466 ANEURALNETWORKS_BAD_DATA);
3467
3468 // Invalid frequency.
3469 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 10.0f),
3470 ANEURALNETWORKS_BAD_DATA);
3471 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 0.0f),
3472 ANEURALNETWORKS_BAD_DATA);
3473 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, -1.0f),
3474 ANEURALNETWORKS_BAD_DATA);
3475
3476 // Specify the same operand twice.
3477 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 1.0f),
3478 ANEURALNETWORKS_NO_ERROR);
3479 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 1.0f),
3480 ANEURALNETWORKS_BAD_DATA);
3481
3482 // Attempting to modify a finished descriptor.
3483 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(mDesc), ANEURALNETWORKS_NO_ERROR);
3484 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 1.0f),
3485 ANEURALNETWORKS_BAD_STATE);
3486 }
3487
TEST_F(ValidationTestMemoryDesc,AddOutputRole)3488 TEST_F(ValidationTestMemoryDesc, AddOutputRole) {
3489 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(nullptr, mCompilation, 0, 1.0f),
3490 ANEURALNETWORKS_UNEXPECTED_NULL);
3491 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, nullptr, 0, 1.0f),
3492 ANEURALNETWORKS_UNEXPECTED_NULL);
3493
3494 // Unfinished compilation.
3495 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, 1.0f),
3496 ANEURALNETWORKS_BAD_DATA);
3497
3498 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3499
3500 // Index out of range.
3501 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 999, 1.0f),
3502 ANEURALNETWORKS_BAD_DATA);
3503
3504 // Invalid frequency.
3505 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, 10.0f),
3506 ANEURALNETWORKS_BAD_DATA);
3507 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, 0.0f),
3508 ANEURALNETWORKS_BAD_DATA);
3509 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, -1.0f),
3510 ANEURALNETWORKS_BAD_DATA);
3511
3512 // Specify the same operand twice.
3513 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, 1.0f),
3514 ANEURALNETWORKS_NO_ERROR);
3515 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, 1.0f),
3516 ANEURALNETWORKS_BAD_DATA);
3517
3518 // Attempting to modify a finished descriptor.
3519 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(mDesc), ANEURALNETWORKS_NO_ERROR);
3520 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(mDesc, mCompilation, 0, 1.0f),
3521 ANEURALNETWORKS_BAD_STATE);
3522 }
3523
3524 // Creates and compiles a single-operation ADD model with the given operand type.
3525 // The caller is responsible to free the returned model and compilation.
3526 static std::pair<ANeuralNetworksModel*, ANeuralNetworksCompilation*>
createAndCompileAddModelWithType(const ANeuralNetworksOperandType & type)3527 createAndCompileAddModelWithType(const ANeuralNetworksOperandType& type) {
3528 // OperandType for activation scalar.
3529 const ANeuralNetworksOperandType actType = {
3530 .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
3531
3532 ANeuralNetworksModel* model;
3533 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
3534 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &type), ANEURALNETWORKS_NO_ERROR);
3535 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &type), ANEURALNETWORKS_NO_ERROR);
3536 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &actType), ANEURALNETWORKS_NO_ERROR);
3537 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &type), ANEURALNETWORKS_NO_ERROR);
3538
3539 const uint32_t inList[] = {0, 1, 2};
3540 const uint32_t outList[] = {3};
3541 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_ADD, 3, inList, 1, outList),
3542 ANEURALNETWORKS_NO_ERROR);
3543 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, 3, inList, 1, outList),
3544 ANEURALNETWORKS_NO_ERROR);
3545 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
3546
3547 ANeuralNetworksCompilation* compilation;
3548 EXPECT_EQ(ANeuralNetworksCompilation_create(model, &compilation), ANEURALNETWORKS_NO_ERROR);
3549 EXPECT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR);
3550 return {model, compilation};
3551 }
3552
testIncompatibleOperands(const ANeuralNetworksCompilation * compilation,const ANeuralNetworksOperandType & badType)3553 static void testIncompatibleOperands(const ANeuralNetworksCompilation* compilation,
3554 const ANeuralNetworksOperandType& badType) {
3555 const auto [badModel, badCompilation] = createAndCompileAddModelWithType(badType);
3556 {
3557 ANeuralNetworksMemoryDesc* desc = nullptr;
3558 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3559 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, compilation, 0, 1.0f),
3560 ANEURALNETWORKS_NO_ERROR);
3561 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, badCompilation, 0, 1.0f),
3562 ANEURALNETWORKS_BAD_DATA);
3563 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, badCompilation, 0, 1.0f),
3564 ANEURALNETWORKS_BAD_DATA);
3565 ANeuralNetworksMemoryDesc_free(desc);
3566 }
3567 {
3568 ANeuralNetworksMemoryDesc* desc = nullptr;
3569 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3570 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, compilation, 0, 1.0f),
3571 ANEURALNETWORKS_NO_ERROR);
3572 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, badCompilation, 0, 1.0f),
3573 ANEURALNETWORKS_BAD_DATA);
3574 EXPECT_EQ(ANeuralNetworksMemoryDesc_addOutputRole(desc, badCompilation, 0, 1.0f),
3575 ANEURALNETWORKS_BAD_DATA);
3576 ANeuralNetworksMemoryDesc_free(desc);
3577 }
3578 ANeuralNetworksCompilation_free(badCompilation);
3579 ANeuralNetworksModel_free(badModel);
3580 }
3581
TEST_F(ValidationTestMemoryDesc,OperandMetadata)3582 TEST_F(ValidationTestMemoryDesc, OperandMetadata) {
3583 const uint32_t dimensions[] = {2};
3584 const uint32_t rank = std::size(dimensions);
3585 const ANeuralNetworksOperandType floatBase = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3586 .dimensionCount = rank,
3587 .dimensions = dimensions,
3588 .scale = 0.0f,
3589 .zeroPoint = 0};
3590 const ANeuralNetworksOperandType quantBase = {.type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3591 .dimensionCount = rank,
3592 .dimensions = dimensions,
3593 .scale = 1.0f,
3594 .zeroPoint = 0};
3595 const auto [floatModel, floatCompilation] = createAndCompileAddModelWithType(floatBase);
3596 const auto [quantModel, quantCompilation] = createAndCompileAddModelWithType(quantBase);
3597
3598 // Different data type.
3599 {
3600 SCOPED_TRACE("Data type");
3601 ANeuralNetworksOperandType wrongType = floatBase;
3602 wrongType.type = ANEURALNETWORKS_TENSOR_FLOAT16;
3603 testIncompatibleOperands(floatCompilation, wrongType);
3604 }
3605
3606 // Different scale.
3607 {
3608 SCOPED_TRACE("Scale");
3609 ANeuralNetworksOperandType wrongScale = quantBase;
3610 wrongScale.scale = 0.5f;
3611 testIncompatibleOperands(quantCompilation, wrongScale);
3612 }
3613
3614 // Different zero point.
3615 {
3616 SCOPED_TRACE("Zero point");
3617 ANeuralNetworksOperandType wrongZeroPoint = quantBase;
3618 wrongZeroPoint.zeroPoint = 128;
3619 testIncompatibleOperands(quantCompilation, wrongZeroPoint);
3620 }
3621
3622 // Different rank.
3623 {
3624 SCOPED_TRACE("Rank");
3625 const uint32_t badDimensions[] = {2, 1};
3626 const uint32_t badRank = std::size(badDimensions);
3627 ANeuralNetworksOperandType wrongRank = quantBase;
3628 wrongRank.dimensionCount = badRank;
3629 wrongRank.dimensions = badDimensions;
3630 testIncompatibleOperands(quantCompilation, wrongRank);
3631 }
3632
3633 // Different dimensions.
3634 {
3635 SCOPED_TRACE("Dimensions");
3636 const uint32_t badDimensions[] = {1};
3637 ANeuralNetworksOperandType wrongDims = quantBase;
3638 wrongDims.dimensions = badDimensions;
3639 testIncompatibleOperands(quantCompilation, wrongDims);
3640 }
3641
3642 ANeuralNetworksCompilation_free(floatCompilation);
3643 ANeuralNetworksCompilation_free(quantCompilation);
3644 ANeuralNetworksModel_free(floatModel);
3645 ANeuralNetworksModel_free(quantModel);
3646 }
3647
3648 // Creates and compiles a single-operation CONV_2D model with channel quant data type of the given
3649 // scales. The caller is responsible to free the returned model and compilation.
3650 static std::pair<ANeuralNetworksModel*, ANeuralNetworksCompilation*>
createAndCompileChannelQuantConvModel(const std::vector<float> & scales)3651 createAndCompileChannelQuantConvModel(const std::vector<float>& scales) {
3652 const uint32_t numChannels = scales.size();
3653
3654 // OperandType for input and output.
3655 const uint32_t inoutDimensions[] = {1, 16, 16, numChannels};
3656 const ANeuralNetworksOperandType inoutType = {
3657 .type = ANEURALNETWORKS_TENSOR_QUANT8_ASYMM,
3658 .dimensionCount = std::size(inoutDimensions),
3659 .dimensions = inoutDimensions,
3660 .scale = 1.0f,
3661 .zeroPoint = 0,
3662 };
3663
3664 // OperandType for filter.
3665 const uint32_t filterDimensions[] = {numChannels, 3, 3, numChannels};
3666 const ANeuralNetworksOperandType filterType = {
3667 .type = ANEURALNETWORKS_TENSOR_QUANT8_SYMM_PER_CHANNEL,
3668 .dimensionCount = std::size(filterDimensions),
3669 .dimensions = filterDimensions,
3670 .scale = 0.0f,
3671 .zeroPoint = 0,
3672 };
3673
3674 // OperandType for bias.
3675 const uint32_t biasDimensions[] = {numChannels};
3676 const ANeuralNetworksOperandType biasType = {
3677 .type = ANEURALNETWORKS_TENSOR_INT32,
3678 .dimensionCount = std::size(biasDimensions),
3679 .dimensions = biasDimensions,
3680 .scale = 0.0f,
3681 .zeroPoint = 0,
3682 };
3683
3684 // OperandType for scalars: implicit padding code, strides, activation.
3685 const ANeuralNetworksOperandType scalarType = {
3686 .type = ANEURALNETWORKS_INT32, .dimensionCount = 0, .dimensions = nullptr};
3687
3688 ANeuralNetworksModel* model;
3689 EXPECT_EQ(ANeuralNetworksModel_create(&model), ANEURALNETWORKS_NO_ERROR);
3690 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &inoutType), ANEURALNETWORKS_NO_ERROR);
3691 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &filterType), ANEURALNETWORKS_NO_ERROR);
3692 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &biasType), ANEURALNETWORKS_NO_ERROR);
3693 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &scalarType), ANEURALNETWORKS_NO_ERROR);
3694 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &scalarType), ANEURALNETWORKS_NO_ERROR);
3695 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &scalarType), ANEURALNETWORKS_NO_ERROR);
3696 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &scalarType), ANEURALNETWORKS_NO_ERROR);
3697 EXPECT_EQ(ANeuralNetworksModel_addOperand(model, &inoutType), ANEURALNETWORKS_NO_ERROR);
3698
3699 // Set channel quant parameters for the filter tensor.
3700 const ANeuralNetworksSymmPerChannelQuantParams channelQuant = {
3701 .channelDim = 0,
3702 .scaleCount = numChannels,
3703 .scales = scales.data(),
3704 };
3705 EXPECT_EQ(ANeuralNetworksModel_setOperandSymmPerChannelQuantParams(model, 1, &channelQuant),
3706 ANEURALNETWORKS_NO_ERROR);
3707
3708 const uint32_t inList[] = {0, 1, 2, 3, 4, 5, 6};
3709 const uint32_t outList[] = {7};
3710 EXPECT_EQ(ANeuralNetworksModel_addOperation(model, ANEURALNETWORKS_CONV_2D, std::size(inList),
3711 inList, std::size(outList), outList),
3712 ANEURALNETWORKS_NO_ERROR);
3713 EXPECT_EQ(ANeuralNetworksModel_identifyInputsAndOutputs(model, std::size(inList), inList,
3714 std::size(outList), outList),
3715 ANEURALNETWORKS_NO_ERROR);
3716 EXPECT_EQ(ANeuralNetworksModel_finish(model), ANEURALNETWORKS_NO_ERROR);
3717
3718 ANeuralNetworksCompilation* compilation;
3719 EXPECT_EQ(ANeuralNetworksCompilation_create(model, &compilation), ANEURALNETWORKS_NO_ERROR);
3720 EXPECT_EQ(ANeuralNetworksCompilation_finish(compilation), ANEURALNETWORKS_NO_ERROR);
3721 return {model, compilation};
3722 }
3723
TEST_F(ValidationTestMemoryDesc,ExtraParams)3724 TEST_F(ValidationTestMemoryDesc, ExtraParams) {
3725 // Create two compilations with conflict channel quant scales.
3726 const auto [model1, compilation1] = createAndCompileChannelQuantConvModel({1.0f, 1.0f});
3727 const auto [model2, compilation2] = createAndCompileChannelQuantConvModel({0.5f, 0.5f});
3728
3729 ANeuralNetworksMemoryDesc* desc = nullptr;
3730 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3731 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, compilation1, 1, 1.0f),
3732 ANEURALNETWORKS_NO_ERROR);
3733 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, compilation2, 1, 1.0f),
3734 ANEURALNETWORKS_BAD_DATA);
3735 ANeuralNetworksMemoryDesc_free(desc);
3736
3737 ANeuralNetworksCompilation_free(compilation1);
3738 ANeuralNetworksCompilation_free(compilation2);
3739 ANeuralNetworksModel_free(model1);
3740 ANeuralNetworksModel_free(model2);
3741 }
3742
TEST_F(ValidationTestMemoryDesc,SetDimensions)3743 TEST_F(ValidationTestMemoryDesc, SetDimensions) {
3744 const uint32_t dimensions[] = {2};
3745 const uint32_t badDimensions[] = {3};
3746 const uint32_t rank = std::size(dimensions);
3747 const uint32_t badRankDimensions[] = {2, 1};
3748 const uint32_t badRank = std::size(badRankDimensions);
3749
3750 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(nullptr, rank, dimensions),
3751 ANEURALNETWORKS_UNEXPECTED_NULL);
3752 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, rank, nullptr),
3753 ANEURALNETWORKS_UNEXPECTED_NULL);
3754
3755 // Incompatible dimensions.
3756 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, rank, dimensions),
3757 ANEURALNETWORKS_NO_ERROR);
3758 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, rank, badDimensions),
3759 ANEURALNETWORKS_BAD_DATA);
3760 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, badRank, badRankDimensions),
3761 ANEURALNETWORKS_BAD_DATA);
3762
3763 // Attempting to modify a finished descriptor.
3764 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3765 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 1.0f),
3766 ANEURALNETWORKS_NO_ERROR);
3767 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(mDesc), ANEURALNETWORKS_NO_ERROR);
3768 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, rank, dimensions),
3769 ANEURALNETWORKS_BAD_STATE);
3770 }
3771
TEST_F(ValidationTestMemoryDesc,SetScalarDimensionsBeforeAddRole)3772 TEST_F(ValidationTestMemoryDesc, SetScalarDimensionsBeforeAddRole) {
3773 const uint32_t badDimensions[] = {2};
3774 const uint32_t badRank = std::size(badDimensions);
3775
3776 // Set non-zero rank.
3777 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, badRank, badDimensions),
3778 ANEURALNETWORKS_NO_ERROR);
3779
3780 // This should fail because input2 is a scalar.
3781 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3782 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 2, 1.0f),
3783 ANEURALNETWORKS_BAD_DATA);
3784 }
3785
TEST_F(ValidationTestMemoryDesc,SetScalarDimensionsAfterAddRole)3786 TEST_F(ValidationTestMemoryDesc, SetScalarDimensionsAfterAddRole) {
3787 const uint32_t badDimensions[] = {2};
3788 const uint32_t badRank = std::size(badDimensions);
3789
3790 // Input2 is a scalar.
3791 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3792 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 2, 1.0f),
3793 ANEURALNETWORKS_NO_ERROR);
3794
3795 // This should fail because the rank is not zero.
3796 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, 0, nullptr), ANEURALNETWORKS_NO_ERROR);
3797 EXPECT_EQ(ANeuralNetworksMemoryDesc_setDimensions(mDesc, badRank, badDimensions),
3798 ANEURALNETWORKS_BAD_DATA);
3799 }
3800
TEST_F(ValidationTestMemoryDesc,Finish)3801 TEST_F(ValidationTestMemoryDesc, Finish) {
3802 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(nullptr), ANEURALNETWORKS_UNEXPECTED_NULL);
3803
3804 // No usage is specified.
3805 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(mDesc), ANEURALNETWORKS_BAD_DATA);
3806
3807 // Finish an already finished descriptor.
3808 EXPECT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
3809 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(mDesc, mCompilation, 0, 1.0f),
3810 ANEURALNETWORKS_NO_ERROR);
3811 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(mDesc), ANEURALNETWORKS_NO_ERROR);
3812 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(mDesc), ANEURALNETWORKS_BAD_STATE);
3813 }
3814
TEST_F(ValidationTestMemoryDesc,CreateMemory)3815 TEST_F(ValidationTestMemoryDesc, CreateMemory) {
3816 ANeuralNetworksMemory* memory = nullptr;
3817 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(nullptr, &memory),
3818 ANEURALNETWORKS_UNEXPECTED_NULL);
3819 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(mDesc, nullptr),
3820 ANEURALNETWORKS_UNEXPECTED_NULL);
3821
3822 // Unfinished descriptor.
3823 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(mDesc, &memory), ANEURALNETWORKS_BAD_STATE);
3824
3825 ANeuralNetworksMemory_free(memory);
3826 }
3827
TEST(ValidationTestMemory,CreateFromFd)3828 TEST(ValidationTestMemory, CreateFromFd) {
3829 const size_t memorySize = 20;
3830 #ifdef __ANDROID__
3831 int memoryFd = ASharedMemory_create("nnMemory", memorySize);
3832 #else // __ANDROID__
3833 TemporaryFile tmpFile;
3834 int memoryFd = tmpFile.release();
3835 CHECK_EQ(ftruncate(memoryFd, memorySize), 0);
3836 #endif // __ANDROID__
3837 ASSERT_GT(memoryFd, 0);
3838
3839 EXPECT_EQ(ANeuralNetworksMemory_createFromFd(memorySize, PROT_READ | PROT_WRITE, memoryFd, 0,
3840 nullptr),
3841 ANEURALNETWORKS_UNEXPECTED_NULL);
3842
3843 close(memoryFd);
3844 }
3845
3846 #ifdef __ANDROID__
TEST(ValidationTestMemory,CreateFromAHardwareBuffer)3847 TEST(ValidationTestMemory, CreateFromAHardwareBuffer) {
3848 const size_t memorySize = 20;
3849 AHardwareBuffer_Desc desc{
3850 .width = memorySize,
3851 .height = 1,
3852 .layers = 1,
3853 .format = AHARDWAREBUFFER_FORMAT_BLOB,
3854 .usage = AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN | AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN,
3855 };
3856 AHardwareBuffer* buffer = nullptr;
3857 ASSERT_EQ(AHardwareBuffer_allocate(&desc, &buffer), 0);
3858 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(buffer, nullptr),
3859 ANEURALNETWORKS_UNEXPECTED_NULL);
3860 AHardwareBuffer_release(buffer);
3861
3862 ANeuralNetworksMemory* memory = nullptr;
3863 EXPECT_EQ(ANeuralNetworksMemory_createFromAHardwareBuffer(nullptr, &memory),
3864 ANEURALNETWORKS_UNEXPECTED_NULL);
3865 }
3866 #endif // __ANDROID__
3867
TEST_F(ValidationTestMemoryDesc,MemoryCopying)3868 TEST_F(ValidationTestMemoryDesc, MemoryCopying) {
3869 uint32_t goodSize = sizeof(float) * 2, badSize1 = sizeof(float), badSize2 = sizeof(float) * 4;
3870 ANeuralNetworksMemory* goodAshmem = createAshmem(goodSize);
3871 ANeuralNetworksMemory* badAshmem1 = createAshmem(badSize1);
3872 ANeuralNetworksMemory* badAshmem2 = createAshmem(badSize2);
3873
3874 const uint32_t goodDimensions[] = {1, 2};
3875 const uint32_t badDimensions1[] = {2};
3876 const uint32_t badDimensions2[] = {2, 1};
3877 const ANeuralNetworksOperandType goodType = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3878 .dimensionCount = std::size(goodDimensions),
3879 .dimensions = goodDimensions,
3880 .scale = 0.0f,
3881 .zeroPoint = 0};
3882 const ANeuralNetworksOperandType badType1 = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3883 .dimensionCount = std::size(badDimensions1),
3884 .dimensions = badDimensions1,
3885 .scale = 0.0f,
3886 .zeroPoint = 0};
3887 const ANeuralNetworksOperandType badType2 = {.type = ANEURALNETWORKS_TENSOR_FLOAT32,
3888 .dimensionCount = std::size(badDimensions2),
3889 .dimensions = badDimensions2,
3890 .scale = 0.0f,
3891 .zeroPoint = 0};
3892 const auto [goodModel, goodCompilation] = createAndCompileAddModelWithType(goodType);
3893 const auto [badModel1, badCompilation1] = createAndCompileAddModelWithType(badType1);
3894 const auto [badModel2, badCompilation2] = createAndCompileAddModelWithType(badType2);
3895
3896 ANeuralNetworksMemoryDesc* desc = nullptr;
3897 ANeuralNetworksMemory *goodDeviceMemory1 = nullptr, *goodDeviceMemory2 = nullptr;
3898 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3899 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, goodCompilation, 0, 1.0f),
3900 ANEURALNETWORKS_NO_ERROR);
3901 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
3902 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &goodDeviceMemory1),
3903 ANEURALNETWORKS_NO_ERROR);
3904 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &goodDeviceMemory2),
3905 ANEURALNETWORKS_NO_ERROR);
3906 ANeuralNetworksMemoryDesc_free(desc);
3907
3908 ANeuralNetworksMemory* badDeviceMemory1 = nullptr;
3909 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3910 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, badCompilation1, 0, 1.0f),
3911 ANEURALNETWORKS_NO_ERROR);
3912 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
3913 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &badDeviceMemory1),
3914 ANEURALNETWORKS_NO_ERROR);
3915 ANeuralNetworksMemoryDesc_free(desc);
3916
3917 ANeuralNetworksMemory* badDeviceMemory2 = nullptr;
3918 EXPECT_EQ(ANeuralNetworksMemoryDesc_create(&desc), ANEURALNETWORKS_NO_ERROR);
3919 EXPECT_EQ(ANeuralNetworksMemoryDesc_addInputRole(desc, badCompilation2, 0, 1.0f),
3920 ANEURALNETWORKS_NO_ERROR);
3921 EXPECT_EQ(ANeuralNetworksMemoryDesc_finish(desc), ANEURALNETWORKS_NO_ERROR);
3922 EXPECT_EQ(ANeuralNetworksMemory_createFromDesc(desc, &badDeviceMemory2),
3923 ANEURALNETWORKS_NO_ERROR);
3924 ANeuralNetworksMemoryDesc_free(desc);
3925
3926 EXPECT_EQ(ANeuralNetworksMemory_copy(nullptr, goodDeviceMemory1),
3927 ANEURALNETWORKS_UNEXPECTED_NULL);
3928 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory1, nullptr),
3929 ANEURALNETWORKS_UNEXPECTED_NULL);
3930
3931 // Ashmem -> Ashmem
3932 // Bad memory size.
3933 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, badAshmem1), ANEURALNETWORKS_BAD_DATA);
3934 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, badAshmem2), ANEURALNETWORKS_BAD_DATA);
3935 EXPECT_EQ(ANeuralNetworksMemory_copy(badAshmem1, goodAshmem), ANEURALNETWORKS_BAD_DATA);
3936 EXPECT_EQ(ANeuralNetworksMemory_copy(badAshmem2, goodAshmem), ANEURALNETWORKS_BAD_DATA);
3937
3938 // Ashmem -> Device Memory
3939 // Bad memory size.
3940 EXPECT_EQ(ANeuralNetworksMemory_copy(badAshmem1, goodDeviceMemory1), ANEURALNETWORKS_BAD_DATA);
3941 EXPECT_EQ(ANeuralNetworksMemory_copy(badAshmem2, goodDeviceMemory1), ANEURALNETWORKS_BAD_DATA);
3942
3943 // Device Memory -> Ashmem
3944 // Uninitialized source device memory.
3945 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory1, goodAshmem), ANEURALNETWORKS_BAD_DATA);
3946 // Bad memory size.
3947 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, goodDeviceMemory1), ANEURALNETWORKS_NO_ERROR);
3948 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory1, badAshmem1), ANEURALNETWORKS_BAD_DATA);
3949 // Uninitialized source device memory (after a failed copy).
3950 EXPECT_EQ(ANeuralNetworksMemory_copy(badAshmem1, goodDeviceMemory1), ANEURALNETWORKS_BAD_DATA);
3951 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory1, goodAshmem), ANEURALNETWORKS_BAD_DATA);
3952 // Bad memory size.
3953 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, goodDeviceMemory1), ANEURALNETWORKS_NO_ERROR);
3954 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory1, badAshmem2), ANEURALNETWORKS_BAD_DATA);
3955
3956 // Device Memory -> Device Memory
3957 // Uninitialized source device memory.
3958 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory2, goodDeviceMemory1),
3959 ANEURALNETWORKS_BAD_DATA);
3960 // Incompatible rank.
3961 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, badDeviceMemory1), ANEURALNETWORKS_NO_ERROR);
3962 EXPECT_EQ(ANeuralNetworksMemory_copy(badDeviceMemory1, goodDeviceMemory1),
3963 ANEURALNETWORKS_BAD_DATA);
3964 // Incompatible dimensions.
3965 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, badDeviceMemory2), ANEURALNETWORKS_NO_ERROR);
3966 EXPECT_EQ(ANeuralNetworksMemory_copy(badDeviceMemory2, goodDeviceMemory1),
3967 ANEURALNETWORKS_BAD_DATA);
3968 // Deinitialized source device memory.
3969 EXPECT_EQ(ANeuralNetworksMemory_copy(goodAshmem, goodDeviceMemory2), ANEURALNETWORKS_NO_ERROR);
3970 EXPECT_EQ(ANeuralNetworksMemory_copy(badAshmem1, goodDeviceMemory2), ANEURALNETWORKS_BAD_DATA);
3971 EXPECT_EQ(ANeuralNetworksMemory_copy(goodDeviceMemory2, goodDeviceMemory1),
3972 ANEURALNETWORKS_BAD_DATA);
3973
3974 ANeuralNetworksMemory_free(goodDeviceMemory1);
3975 ANeuralNetworksMemory_free(goodDeviceMemory2);
3976 ANeuralNetworksMemory_free(badDeviceMemory1);
3977 ANeuralNetworksMemory_free(badDeviceMemory2);
3978 ANeuralNetworksCompilation_free(goodCompilation);
3979 ANeuralNetworksCompilation_free(badCompilation1);
3980 ANeuralNetworksCompilation_free(badCompilation2);
3981 ANeuralNetworksModel_free(goodModel);
3982 ANeuralNetworksModel_free(badModel1);
3983 ANeuralNetworksModel_free(badModel2);
3984 }
3985
3986 #ifndef NNTEST_ONLY_PUBLIC_API
TEST(ValidationTestDevice,GetExtensionSupport)3987 TEST(ValidationTestDevice, GetExtensionSupport) {
3988 bool result;
3989 EXPECT_EQ(ANeuralNetworksDevice_getExtensionSupport(nullptr, kTestExtensionName, &result),
3990 ANEURALNETWORKS_UNEXPECTED_NULL);
3991
3992 uint32_t numDevices = 0;
3993 EXPECT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
3994
3995 for (uint32_t i = 0; i < numDevices; i++) {
3996 SCOPED_TRACE(i);
3997 ANeuralNetworksDevice* device;
3998 EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
3999 EXPECT_EQ(ANeuralNetworksDevice_getExtensionSupport(device, kTestExtensionName, nullptr),
4000 ANEURALNETWORKS_UNEXPECTED_NULL);
4001 EXPECT_EQ(ANeuralNetworksDevice_getExtensionSupport(device, nullptr, &result),
4002 ANEURALNETWORKS_UNEXPECTED_NULL);
4003 EXPECT_EQ(ANeuralNetworksDevice_getExtensionSupport(device, kTestExtensionName, &result),
4004 ANEURALNETWORKS_NO_ERROR);
4005 }
4006 }
4007
4008 constexpr const char* kTestAttributeExtensionName = "com.android.test_attribute_extension";
4009 const uint16_t kAttributeCode = 0;
4010 const uint16_t kAttributeCode2 = 2;
4011 const uint8_t kAttributeValue = 0;
4012
4013 class ValidationTestCompilationExtension : public ValidationTestCompilation {
4014 protected:
SetUp()4015 virtual void SetUp() {
4016 ValidationTestCompilation::SetUp();
4017 EXPECT_TRUE(::android::nn::TypeManager::get()->forTest_registerExtension({
4018 .name = kTestAttributeExtensionName,
4019 .operandTypes = {},
4020 }));
4021 }
4022
TearDown()4023 virtual void TearDown() {
4024 ::android::nn::TypeManager::get()->forTest_reset();
4025 ValidationTestCompilation::TearDown();
4026 }
4027 };
4028
4029 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_1, AddExtensionAttribute)
4030 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_2, AddExtensionAttribute)
TEST_F(ValidationTestCompilationExtension,AddExtensionAttribute)4031 TEST_F(ValidationTestCompilationExtension, AddExtensionAttribute) {
4032 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(nullptr, kTestAttributeExtensionName,
4033 kAttributeCode, &kAttributeValue,
4034 sizeof(uint8_t)),
4035 ANEURALNETWORKS_UNEXPECTED_NULL);
4036 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4037 mCompilation, nullptr, kAttributeCode, &kAttributeValue, sizeof(uint8_t)),
4038 ANEURALNETWORKS_UNEXPECTED_NULL);
4039 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4040 mCompilation, kTestAttributeExtensionName, kAttributeCode, nullptr,
4041 sizeof(uint8_t)),
4042 ANEURALNETWORKS_UNEXPECTED_NULL);
4043
4044 // ExtensionAttribute can only be added to Compilations created from CompilationForDevices with
4045 // one device specified.
4046 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4047 mCompilation, kTestAttributeExtensionName, kAttributeCode, &kAttributeValue,
4048 sizeof(uint8_t)),
4049 ANEURALNETWORKS_BAD_DATA);
4050 }
4051
4052 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_1, ExecutionAddExtensionAttribute)
4053 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_2, ExecutionAddExtensionAttribute)
TEST_F(ValidationTestCompilationExtension,ExecutionAddExtensionAttribute)4054 TEST_F(ValidationTestCompilationExtension, ExecutionAddExtensionAttribute) {
4055 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(nullptr, kTestAttributeExtensionName,
4056 kAttributeCode, &kAttributeValue,
4057 sizeof(uint8_t)),
4058 ANEURALNETWORKS_UNEXPECTED_NULL);
4059
4060 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
4061 ANeuralNetworksExecution* execution;
4062 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
4063 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, nullptr, kAttributeCode,
4064 &kAttributeValue, sizeof(uint8_t)),
4065 ANEURALNETWORKS_UNEXPECTED_NULL);
4066 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4067 kAttributeCode, nullptr,
4068 sizeof(uint8_t)),
4069 ANEURALNETWORKS_UNEXPECTED_NULL);
4070 // ExtensionAttribute can only be added to Executions created from CompilationForDevices with
4071 // one device specified.
4072 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4073 kAttributeCode, &kAttributeValue,
4074 sizeof(uint8_t)),
4075 ANEURALNETWORKS_BAD_DATA);
4076 ANeuralNetworksExecution_free(execution);
4077 }
4078
4079 class ValidationTestCompilationExtensionForDevices_1
4080 : public ValidationTestCompilationForDevices_1 {
4081 protected:
SetUp()4082 virtual void SetUp() {
4083 ValidationTestCompilationForDevices_1::SetUp();
4084 EXPECT_TRUE(::android::nn::TypeManager::get()->forTest_registerExtension({
4085 .name = kTestAttributeExtensionName,
4086 .operandTypes = {},
4087 }));
4088 }
4089
TearDown()4090 virtual void TearDown() {
4091 ::android::nn::TypeManager::get()->forTest_reset();
4092 ValidationTestCompilationForDevices_1::TearDown();
4093 }
4094 };
4095
4096 // Also see TEST_F(ValidationTestCompilationExtension, AddExtensionAttribute)
4097 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_2, AddExtensionAttribute)
TEST_F(ValidationTestCompilationExtensionForDevices_1,AddExtensionAttribute)4098 TEST_F(ValidationTestCompilationExtensionForDevices_1, AddExtensionAttribute) {
4099 if (!mCompilation) {
4100 return;
4101 }
4102 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4103 mCompilation, kTestAttributeExtensionName, kAttributeCode, &kAttributeValue,
4104 sizeof(uint8_t)),
4105 ANEURALNETWORKS_NO_ERROR);
4106 // Adding another attribute.
4107 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4108 mCompilation, kTestAttributeExtensionName, kAttributeCode2, &kAttributeValue,
4109 sizeof(uint8_t)),
4110 ANEURALNETWORKS_NO_ERROR);
4111 // Adding the same attribute twice is illegal.
4112 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4113 mCompilation, kTestAttributeExtensionName, kAttributeCode, &kAttributeValue,
4114 sizeof(uint8_t)),
4115 ANEURALNETWORKS_BAD_DATA);
4116 // Attempt to finish
4117 const int n = ANeuralNetworksCompilation_finish(mCompilation);
4118 EXPECT_TRUE(n == ANEURALNETWORKS_NO_ERROR);
4119
4120 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4121 mCompilation, kTestAttributeExtensionName, kAttributeCode, &kAttributeValue,
4122 sizeof(uint8_t)),
4123 ANEURALNETWORKS_BAD_STATE);
4124 }
4125
4126 // Also see TEST_F(ValidationTestCompilationExtension, ExecutionAddExtensionAttribute)
4127 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_2, ExecutionAddExtensionAttribute)
TEST_F(ValidationTestCompilationExtensionForDevices_1,ExecutionAddExtensionAttribute)4128 TEST_F(ValidationTestCompilationExtensionForDevices_1, ExecutionAddExtensionAttribute) {
4129 if (!mCompilation) {
4130 return;
4131 }
4132 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
4133 ANeuralNetworksExecution* execution;
4134 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
4135
4136 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4137 kAttributeCode, &kAttributeValue,
4138 sizeof(uint8_t)),
4139 ANEURALNETWORKS_NO_ERROR);
4140 // Adding another attribute.
4141 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4142 kAttributeCode2, &kAttributeValue,
4143 sizeof(uint8_t)),
4144 ANEURALNETWORKS_NO_ERROR);
4145 // Adding the same attribute twice is illegal.
4146 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4147 kAttributeCode, &kAttributeValue,
4148 sizeof(uint8_t)),
4149 ANEURALNETWORKS_BAD_DATA);
4150
4151 // start the execution
4152 float in0[] = {0.0f, 0.0f}, in1[] = {1.0f, 1.0f}, out0[2];
4153 int in2 = 0;
4154 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 0, nullptr, &in0, sizeof(in0)),
4155 ANEURALNETWORKS_NO_ERROR);
4156 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 1, nullptr, &in1, sizeof(in1)),
4157 ANEURALNETWORKS_NO_ERROR);
4158 ASSERT_EQ(ANeuralNetworksExecution_setInput(execution, 2, nullptr, &in2, sizeof(in2)),
4159 ANEURALNETWORKS_NO_ERROR);
4160 ASSERT_EQ(ANeuralNetworksExecution_setOutput(execution, 0, nullptr, &out0, sizeof(out0)),
4161 ANEURALNETWORKS_NO_ERROR);
4162 ASSERT_EQ(ANeuralNetworksExecution_compute(execution), ANEURALNETWORKS_NO_ERROR);
4163
4164 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4165 kAttributeCode, &kAttributeValue,
4166 sizeof(uint8_t)),
4167 ANEURALNETWORKS_BAD_STATE);
4168
4169 ANeuralNetworksExecution_free(execution);
4170 }
4171
4172 class ValidationTestCompilationExtensionForDevices_2
4173 : public ValidationTestCompilationForDevices_2 {
4174 protected:
SetUp()4175 virtual void SetUp() {
4176 ValidationTestCompilationForDevices_2::SetUp();
4177 EXPECT_TRUE(::android::nn::TypeManager::get()->forTest_registerExtension({
4178 .name = kTestAttributeExtensionName,
4179 .operandTypes = {},
4180 }));
4181 }
4182
TearDown()4183 virtual void TearDown() {
4184 ::android::nn::TypeManager::get()->forTest_reset();
4185 ValidationTestCompilationForDevices_2::TearDown();
4186 }
4187 };
4188
4189 // Also see TEST_F(ValidationTestCompilationExtension, AddExtensionAttribute)
4190 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_1, AddExtensionAttribute)
TEST_F(ValidationTestCompilationExtensionForDevices_2,AddExtensionAttribute)4191 TEST_F(ValidationTestCompilationExtensionForDevices_2, AddExtensionAttribute) {
4192 if (!mCompilation) {
4193 return;
4194 }
4195 // ExtensionAttribute can only be added to Compilations created from CompilationForDevices with
4196 // one device specified.
4197 EXPECT_EQ(ANeuralNetworksCompilation_addExtensionAttribute(
4198 mCompilation, kTestAttributeExtensionName, kAttributeCode, &kAttributeValue,
4199 sizeof(uint8_t)),
4200 ANEURALNETWORKS_BAD_DATA);
4201 }
4202
4203 // Also see TEST_F(ValidationTestCompilationExtension, ExecutionAddExtensionAttribute)
4204 // Also see TEST_F(ValidationTestCompilationExtensionForDevices_1, ExecutionAddExtensionAttribute)
TEST_F(ValidationTestCompilationExtensionForDevices_2,ExecutionAddExtensionAttribute)4205 TEST_F(ValidationTestCompilationExtensionForDevices_2, ExecutionAddExtensionAttribute) {
4206 if (!mCompilation) {
4207 return;
4208 }
4209 ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
4210 ANeuralNetworksExecution* execution;
4211 ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &execution), ANEURALNETWORKS_NO_ERROR);
4212
4213 // ExtensionAttribute can only be added to Executions created from CompilationForDevices with
4214 // one device specified.
4215 EXPECT_EQ(ANeuralNetworksExecution_addExtensionAttribute(execution, kTestAttributeExtensionName,
4216 kAttributeCode, &kAttributeValue,
4217 sizeof(uint8_t)),
4218 ANEURALNETWORKS_BAD_DATA);
4219 ANeuralNetworksExecution_free(execution);
4220 }
4221 #endif // NNTEST_ONLY_PUBLIC_API
4222
4223 } // namespace
4224