• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "HalInterfaces.h"
18 #include "Manager.h"
19 #include "NeuralNetworks.h"
20 #include "NeuralNetworksExtensions.h"
21 #include "NeuralNetworksWrapperExtensions.h"
22 #include "TestNeuralNetworksWrapper.h"
23 #include "TypeManager.h"
24 #include "Utils.h"
25 #include "ValidateHal.h"
26 
27 #include <gtest/gtest.h>
28 
29 #include "FibonacciDriver.h"
30 #include "FibonacciExtension.h"
31 
32 namespace android {
33 namespace nn {
34 namespace {
35 
36 using ::android::nn::test_wrapper::ExtensionModel;
37 using ::android::nn::test_wrapper::ExtensionOperandParams;
38 using ::android::nn::test_wrapper::ExtensionOperandType;
39 using ::android::nn::test_wrapper::Type;
40 
41 class FibonacciExtensionTest : public ::testing::Test {
42    protected:
SetUp()43     virtual void SetUp() {
44         if (DeviceManager::get()->getUseCpuOnly()) {
45             // This test requires the use a custom driver.
46             GTEST_SKIP();
47         }
48 
49         // Real world extension tests should run against actual hardware
50         // implementations, but there is no hardware supporting the test
51         // extension. Hence the sample software driver.
52         DeviceManager::get()->forTest_registerDevice(sample_driver::FibonacciDriver::kDriverName,
53                                                      new sample_driver::FibonacciDriver());
54         // Discover extensions provided by registered devices.
55         TypeManager::get()->forTest_reset();
56 
57         uint32_t numDevices = 0;
58         ASSERT_EQ(ANeuralNetworks_getDeviceCount(&numDevices), ANEURALNETWORKS_NO_ERROR);
59         ANeuralNetworksDevice* fibonacciDevice = nullptr;
60         ANeuralNetworksDevice* cpuDevice = nullptr;
61         for (uint32_t i = 0; i < numDevices; i++) {
62             ANeuralNetworksDevice* device = nullptr;
63             EXPECT_EQ(ANeuralNetworks_getDevice(i, &device), ANEURALNETWORKS_NO_ERROR);
64             bool supportsFibonacciExtension;
65             ASSERT_EQ(ANeuralNetworksDevice_getExtensionSupport(
66                               device, TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
67                               &supportsFibonacciExtension),
68                       ANEURALNETWORKS_NO_ERROR);
69             if (supportsFibonacciExtension) {
70                 ASSERT_EQ(fibonacciDevice, nullptr) << "Found multiple Fibonacci drivers";
71                 fibonacciDevice = device;
72             } else if (DeviceManager::get()->forTest_isCpuDevice(device)) {
73                 ASSERT_EQ(cpuDevice, nullptr) << "Found multiple CPU drivers";
74                 cpuDevice = device;
75             }
76         }
77         ASSERT_NE(fibonacciDevice, nullptr) << "Expecting Fibonacci driver to be available";
78         ASSERT_NE(cpuDevice, nullptr) << "Expecting CPU driver to be available";
79         mDevices = {fibonacciDevice, cpuDevice};
80     }
81 
TearDown()82     virtual void TearDown() {
83         if (mExecution) {
84             ANeuralNetworksExecution_free(mExecution);
85         }
86         if (mCompilation) {
87             ANeuralNetworksCompilation_free(mCompilation);
88         }
89         DeviceManager::get()->forTest_reInitializeDeviceList();
90         TypeManager::get()->forTest_reset();
91     }
92 
checkSupportedOperations(const std::vector<bool> & expected)93     void checkSupportedOperations(const std::vector<bool>& expected) {
94         const uint32_t kMaxNumberOperations = 256;
95         EXPECT_LE(expected.size(), kMaxNumberOperations);
96         bool supported[kMaxNumberOperations] = {false};
97         EXPECT_EQ(ANeuralNetworksModel_getSupportedOperationsForDevices(
98                           mModel.getHandle(), mDevices.data(), mDevices.size(), supported),
99                   ANEURALNETWORKS_NO_ERROR);
100         for (size_t i = 0; i < expected.size(); ++i) {
101             SCOPED_TRACE(::testing::Message() << "i = " << i);
102             EXPECT_EQ(supported[i], expected[i]);
103         }
104     }
105 
prepareForExecution()106     void prepareForExecution() {
107         ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
108                                                               mDevices.size(), &mCompilation),
109                   ANEURALNETWORKS_NO_ERROR);
110         ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_NO_ERROR);
111         ASSERT_EQ(ANeuralNetworksExecution_create(mCompilation, &mExecution),
112                   ANEURALNETWORKS_NO_ERROR);
113     }
114 
115     std::vector<ANeuralNetworksDevice*> mDevices;
116     ANeuralNetworksExecution* mExecution = nullptr;
117     ANeuralNetworksCompilation* mCompilation = nullptr;
118     ExtensionModel mModel;
119 };
120 
addNopOperation(ExtensionModel * model,ExtensionOperandType inputType,uint32_t input,uint32_t output)121 void addNopOperation(ExtensionModel* model, ExtensionOperandType inputType, uint32_t input,
122                      uint32_t output) {
123     // Our NOP operation is ADD, which has no extension type support.
124     ASSERT_EQ(inputType.operandType.type, ANEURALNETWORKS_TENSOR_FLOAT32);
125     ASSERT_EQ(inputType.dimensions.size(), 1u);
126 
127     uint32_t inputZeros = model->addOperand(&inputType);
128     uint32_t inputSize = inputType.dimensions[0];
129     uint32_t inputLength = sizeof(float) * inputSize;
130     const float kZeros[100] = {};
131     ASSERT_GE(sizeof(kZeros), inputLength);
132     model->setOperandValue(inputZeros, &kZeros, inputLength);
133 
134     ExtensionOperandType scalarType(Type::INT32, {});
135     uint32_t activation = model->addOperand(&scalarType);
136     int32_t kNoActivation = ANEURALNETWORKS_FUSED_NONE;
137     model->setOperandValue(activation, &kNoActivation, sizeof(kNoActivation));
138 
139     model->addOperation(ANEURALNETWORKS_ADD, {input, inputZeros, activation}, {output});
140 }
141 
createModel(ExtensionModel * model,ExtensionOperandType inputType,ExtensionOperandType outputType,bool addNopOperations)142 void createModel(ExtensionModel* model, ExtensionOperandType inputType,
143                  ExtensionOperandType outputType, bool addNopOperations) {
144     uint32_t fibonacciInput = model->addOperand(&inputType);
145     uint32_t fibonacciOutput = model->addOperand(&outputType);
146 
147     uint32_t modelInput = addNopOperations ? model->addOperand(&inputType) : fibonacciInput;
148     uint32_t modelOutput = addNopOperations ? model->addOperand(&outputType) : fibonacciOutput;
149 
150     if (addNopOperations) {
151         addNopOperation(model, inputType, modelInput, fibonacciInput);
152     }
153     model->addOperation(model->getExtensionOperationType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
154                                                          TEST_VENDOR_FIBONACCI),
155                         {fibonacciInput}, {fibonacciOutput});
156     if (addNopOperations) {
157         addNopOperation(model, outputType, fibonacciOutput, modelOutput);
158     }
159 
160     model->identifyInputsAndOutputs({modelInput}, {modelOutput});
161     model->finish();
162     ASSERT_TRUE(model->isValid());
163 }
164 
TEST_F(FibonacciExtensionTest,ModelWithExtensionOperandTypes)165 TEST_F(FibonacciExtensionTest, ModelWithExtensionOperandTypes) {
166     constexpr uint32_t N = 10;
167     constexpr double scale = 0.5;
168     constexpr int64_t zeroPoint = 10;
169 
170     ExtensionOperandType inputType(
171             static_cast<Type>(mModel.getExtensionOperandType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
172                                                              TEST_VENDOR_INT64)),
173             {});
174     ExtensionOperandType outputType(
175             static_cast<Type>(mModel.getExtensionOperandType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
176                                                              TEST_VENDOR_TENSOR_QUANT64_ASYMM)),
177             {N},
178             ExtensionOperandParams(TestVendorQuant64AsymmParams{
179                     .scale = scale,
180                     .zeroPoint = zeroPoint,
181             }));
182     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
183     checkSupportedOperations({true});
184     prepareForExecution();
185 
186     int64_t input = N;
187     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
188               ANEURALNETWORKS_NO_ERROR);
189 
190     int64_t output[N] = {};
191     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
192               ANEURALNETWORKS_NO_ERROR);
193 
194     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
195 
196     EXPECT_EQ(output[0], 1 / scale + zeroPoint);
197     EXPECT_EQ(output[1], 1 / scale + zeroPoint);
198     EXPECT_EQ(output[2], 2 / scale + zeroPoint);
199     EXPECT_EQ(output[3], 3 / scale + zeroPoint);
200     EXPECT_EQ(output[4], 5 / scale + zeroPoint);
201     EXPECT_EQ(output[5], 8 / scale + zeroPoint);
202     EXPECT_EQ(output[6], 13 / scale + zeroPoint);
203     EXPECT_EQ(output[7], 21 / scale + zeroPoint);
204     EXPECT_EQ(output[8], 34 / scale + zeroPoint);
205     EXPECT_EQ(output[9], 55 / scale + zeroPoint);
206 }
207 
TEST_F(FibonacciExtensionTest,ModelWithTemporaries)208 TEST_F(FibonacciExtensionTest, ModelWithTemporaries) {
209     constexpr uint32_t N = 10;
210 
211     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
212     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {N});
213     createModel(&mModel, inputType, outputType, /*addNopOperations=*/true);
214     checkSupportedOperations({true, true, true});
215     prepareForExecution();
216 
217     float input[] = {N};
218     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
219               ANEURALNETWORKS_NO_ERROR);
220 
221     float output[N] = {};
222     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
223               ANEURALNETWORKS_NO_ERROR);
224 
225     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_NO_ERROR);
226 
227     EXPECT_EQ(output[0], 1);
228     EXPECT_EQ(output[1], 1);
229     EXPECT_EQ(output[2], 2);
230     EXPECT_EQ(output[3], 3);
231     EXPECT_EQ(output[4], 5);
232     EXPECT_EQ(output[5], 8);
233     EXPECT_EQ(output[6], 13);
234     EXPECT_EQ(output[7], 21);
235     EXPECT_EQ(output[8], 34);
236     EXPECT_EQ(output[9], 55);
237 }
238 
TEST_F(FibonacciExtensionTest,InvalidInputType)239 TEST_F(FibonacciExtensionTest, InvalidInputType) {
240     ExtensionOperandType inputType(Type::TENSOR_INT32, {1});  // Unsupported type.
241     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
242     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
243     checkSupportedOperations({false});  // The driver reports that it doesn't support the operation.
244     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
245                                                           mDevices.size(), &mCompilation),
246               ANEURALNETWORKS_NO_ERROR);
247     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
248 }
249 
TEST_F(FibonacciExtensionTest,InvalidOutputType)250 TEST_F(FibonacciExtensionTest, InvalidOutputType) {
251     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
252     ExtensionOperandType outputType(Type::TENSOR_INT32, {1});  // Unsupported type.
253     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
254     checkSupportedOperations({false});  // The driver reports that it doesn't support the operation.
255     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
256                                                           mDevices.size(), &mCompilation),
257               ANEURALNETWORKS_NO_ERROR);
258     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
259 }
260 
TEST_F(FibonacciExtensionTest,InvalidInputValue)261 TEST_F(FibonacciExtensionTest, InvalidInputValue) {
262     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
263     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
264     createModel(&mModel, inputType, outputType, /*addNopOperations=*/false);
265     checkSupportedOperations({true});
266     prepareForExecution();
267 
268     float input[] = {-1};  // Invalid input value.
269     EXPECT_EQ(ANeuralNetworksExecution_setInput(mExecution, 0, nullptr, &input, sizeof(input)),
270               ANEURALNETWORKS_NO_ERROR);
271 
272     float output[1] = {};
273     EXPECT_EQ(ANeuralNetworksExecution_setOutput(mExecution, 0, nullptr, &output, sizeof(output)),
274               ANEURALNETWORKS_NO_ERROR);
275 
276     ASSERT_EQ(ANeuralNetworksExecution_compute(mExecution), ANEURALNETWORKS_OP_FAILED);
277 }
278 
TEST_F(FibonacciExtensionTest,InvalidNumInputs)279 TEST_F(FibonacciExtensionTest, InvalidNumInputs) {
280     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
281     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
282     uint32_t input1 = mModel.addOperand(&inputType);
283     uint32_t input2 = mModel.addOperand(&inputType);  // Extra input.
284     uint32_t output = mModel.addOperand(&outputType);
285     mModel.addOperation(mModel.getExtensionOperationType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
286                                                          TEST_VENDOR_FIBONACCI),
287                         {input1, input2}, {output});
288     mModel.identifyInputsAndOutputs({input1, input2}, {output});
289     mModel.finish();
290     ASSERT_TRUE(mModel.isValid());
291     checkSupportedOperations({false});
292     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
293                                                           mDevices.size(), &mCompilation),
294               ANEURALNETWORKS_NO_ERROR);
295     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
296 }
297 
TEST_F(FibonacciExtensionTest,InvalidNumOutputs)298 TEST_F(FibonacciExtensionTest, InvalidNumOutputs) {
299     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
300     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
301     uint32_t input = mModel.addOperand(&inputType);
302     uint32_t output1 = mModel.addOperand(&outputType);
303     uint32_t output2 = mModel.addOperand(&outputType);  // Extra output.
304     mModel.addOperation(mModel.getExtensionOperationType(TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
305                                                          TEST_VENDOR_FIBONACCI),
306                         {input}, {output1, output2});
307     mModel.identifyInputsAndOutputs({input}, {output1, output2});
308     mModel.finish();
309     ASSERT_TRUE(mModel.isValid());
310     checkSupportedOperations({false});
311     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
312                                                           mDevices.size(), &mCompilation),
313               ANEURALNETWORKS_NO_ERROR);
314     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
315 }
316 
TEST_F(FibonacciExtensionTest,InvalidOperation)317 TEST_F(FibonacciExtensionTest, InvalidOperation) {
318     ExtensionOperandType inputType(Type::TENSOR_FLOAT32, {1});
319     ExtensionOperandType outputType(Type::TENSOR_FLOAT32, {1});
320     uint32_t input = mModel.addOperand(&inputType);
321     uint32_t output = mModel.addOperand(&outputType);
322     mModel.addOperation(mModel.getExtensionOperationType(
323                                 TEST_VENDOR_FIBONACCI_EXTENSION_NAME,
324                                 TEST_VENDOR_FIBONACCI + 1),  // This operation should not exist.
325                         {input}, {output});
326     mModel.identifyInputsAndOutputs({input}, {output});
327     mModel.finish();
328     ASSERT_TRUE(mModel.isValid());
329     checkSupportedOperations({false});
330     ASSERT_EQ(ANeuralNetworksCompilation_createForDevices(mModel.getHandle(), mDevices.data(),
331                                                           mDevices.size(), &mCompilation),
332               ANEURALNETWORKS_NO_ERROR);
333     ASSERT_EQ(ANeuralNetworksCompilation_finish(mCompilation), ANEURALNETWORKS_BAD_DATA);
334 }
335 
336 }  // namespace
337 }  // namespace nn
338 }  // namespace android
339