• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "neuralnetworks_aidl_hal_test"
18 
19 #include <aidl/android/hardware/neuralnetworks/RequestMemoryPool.h>
20 #include <android/binder_auto_utils.h>
21 #include <variant>
22 
23 #include <chrono>
24 
25 #include <TestHarness.h>
26 #include <nnapi/hal/aidl/Utils.h>
27 
28 #include "Callbacks.h"
29 #include "GeneratedTestHarness.h"
30 #include "Utils.h"
31 #include "VtsHalNeuralnetworks.h"
32 
33 namespace aidl::android::hardware::neuralnetworks::vts::functional {
34 
35 using ExecutionMutation = std::function<void(Request*)>;
36 
37 ///////////////////////// UTILITY FUNCTIONS /////////////////////////
38 
39 // Test request validation with reusable execution.
validateReusableExecution(const std::shared_ptr<IPreparedModel> & preparedModel,const std::string & message,const Request & request,bool measure)40 static void validateReusableExecution(const std::shared_ptr<IPreparedModel>& preparedModel,
41                                       const std::string& message, const Request& request,
42                                       bool measure) {
43     // createReusableExecution
44     std::shared_ptr<IExecution> execution;
45     {
46         SCOPED_TRACE(message + " [createReusableExecution]");
47         const auto createStatus = preparedModel->createReusableExecution(
48                 request, {measure, kOmittedTimeoutDuration, {}, {}}, &execution);
49         if (!createStatus.isOk()) {
50             ASSERT_EQ(createStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
51             ASSERT_EQ(static_cast<ErrorStatus>(createStatus.getServiceSpecificError()),
52                       ErrorStatus::INVALID_ARGUMENT);
53             ASSERT_EQ(nullptr, execution);
54             return;
55         } else {
56             ASSERT_NE(nullptr, execution);
57         }
58     }
59 
60     // synchronous
61     {
62         SCOPED_TRACE(message + " [executeSynchronously]");
63         ExecutionResult executionResult;
64         const auto executeStatus = execution->executeSynchronously(kNoDeadline, &executionResult);
65         ASSERT_FALSE(executeStatus.isOk());
66         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
67         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
68                   ErrorStatus::INVALID_ARGUMENT);
69     }
70 
71     // fenced
72     {
73         SCOPED_TRACE(message + " [executeFenced]");
74         FencedExecutionResult executionResult;
75         const auto executeStatus =
76                 execution->executeFenced({}, kNoDeadline, kNoDuration, &executionResult);
77         ASSERT_FALSE(executeStatus.isOk());
78         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
79         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
80                   ErrorStatus::INVALID_ARGUMENT);
81     }
82 }
83 
84 // Primary validation function. This function will take a valid request, apply a
85 // mutation to it to invalidate the request, then pass it to interface calls
86 // that use the request.
validate(const std::shared_ptr<IPreparedModel> & preparedModel,const std::string & message,const Request & originalRequest,const ExecutionMutation & mutate)87 static void validate(const std::shared_ptr<IPreparedModel>& preparedModel,
88                      const std::string& message, const Request& originalRequest,
89                      const ExecutionMutation& mutate) {
90     Request request = utils::clone(originalRequest).value();
91     mutate(&request);
92 
93     // We'd like to test both with timing requested and without timing
94     // requested. Rather than running each test both ways, we'll decide whether
95     // to request timing by hashing the message. We do not use std::hash because
96     // it is not guaranteed stable across executions.
97     char hash = 0;
98     for (auto c : message) {
99         hash ^= c;
100     };
101     bool measure = (hash & 1);
102 
103     // synchronous
104     {
105         SCOPED_TRACE(message + " [executeSynchronously]");
106         ExecutionResult executionResult;
107         const auto executeStatus = preparedModel->executeSynchronously(
108                 request, measure, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
109         ASSERT_FALSE(executeStatus.isOk());
110         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
111         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
112                   ErrorStatus::INVALID_ARGUMENT);
113     }
114 
115     // fenced
116     {
117         SCOPED_TRACE(message + " [executeFenced]");
118         FencedExecutionResult executionResult;
119         const auto executeStatus = preparedModel->executeFenced(request, {}, false, kNoDeadline,
120                                                                 kOmittedTimeoutDuration,
121                                                                 kNoDuration, &executionResult);
122         ASSERT_FALSE(executeStatus.isOk());
123         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
124         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
125                   ErrorStatus::INVALID_ARGUMENT);
126     }
127 
128     // burst
129     {
130         SCOPED_TRACE(message + " [burst]");
131 
132         // create burst
133         std::shared_ptr<IBurst> burst;
134         auto ret = preparedModel->configureExecutionBurst(&burst);
135         ASSERT_TRUE(ret.isOk()) << ret.getDescription();
136         ASSERT_NE(nullptr, burst.get());
137 
138         // use -1 for all memory identifier tokens
139         const std::vector<int64_t> slots(request.pools.size(), -1);
140 
141         ExecutionResult executionResult;
142         const auto executeStatus = burst->executeSynchronously(
143                 request, slots, measure, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
144         ASSERT_FALSE(executeStatus.isOk());
145         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
146         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
147                   ErrorStatus::INVALID_ARGUMENT);
148     }
149 
150     int32_t aidlVersion;
151     ASSERT_TRUE(preparedModel->getInterfaceVersion(&aidlVersion).isOk());
152     if (aidlVersion < kMinAidlLevelForFL8) {
153         return;
154     }
155 
156     // validate reusable execution
157     validateReusableExecution(preparedModel, message, request, measure);
158 
159     // synchronous with empty hints
160     {
161         SCOPED_TRACE(message + " [executeSynchronouslyWithConfig]");
162         ExecutionResult executionResult;
163         const auto executeStatus = preparedModel->executeSynchronouslyWithConfig(
164                 request, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, &executionResult);
165         ASSERT_FALSE(executeStatus.isOk());
166         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
167         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
168                   ErrorStatus::INVALID_ARGUMENT);
169     }
170 
171     // fenced with empty hints
172     {
173         SCOPED_TRACE(message + " [executeFencedWithConfig]");
174         FencedExecutionResult executionResult;
175         const auto executeStatus = preparedModel->executeFencedWithConfig(
176                 request, {}, {false, kOmittedTimeoutDuration, {}, {}}, kNoDeadline, kNoDuration,
177                 &executionResult);
178         ASSERT_FALSE(executeStatus.isOk());
179         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
180         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
181                   ErrorStatus::INVALID_ARGUMENT);
182     }
183 
184     // burst with empty hints
185     {
186         SCOPED_TRACE(message + " [burst executeSynchronouslyWithConfig]");
187 
188         // create burst
189         std::shared_ptr<IBurst> burst;
190         auto ret = preparedModel->configureExecutionBurst(&burst);
191         ASSERT_TRUE(ret.isOk()) << ret.getDescription();
192         ASSERT_NE(nullptr, burst.get());
193 
194         // use -1 for all memory identifier tokens
195         const std::vector<int64_t> slots(request.pools.size(), -1);
196 
197         ExecutionResult executionResult;
198         const auto executeStatus = burst->executeSynchronouslyWithConfig(
199                 request, slots, {measure, kOmittedTimeoutDuration, {}, {}}, kNoDeadline,
200                 &executionResult);
201         ASSERT_FALSE(executeStatus.isOk());
202         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
203         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
204                   ErrorStatus::INVALID_ARGUMENT);
205     }
206 }
207 
createBurst(const std::shared_ptr<IPreparedModel> & preparedModel)208 std::shared_ptr<IBurst> createBurst(const std::shared_ptr<IPreparedModel>& preparedModel) {
209     std::shared_ptr<IBurst> burst;
210     const auto ret = preparedModel->configureExecutionBurst(&burst);
211     if (!ret.isOk()) return nullptr;
212     return burst;
213 }
214 
215 ///////////////////////// REMOVE INPUT ////////////////////////////////////
216 
removeInputTest(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)217 static void removeInputTest(const std::shared_ptr<IPreparedModel>& preparedModel,
218                             const Request& request) {
219     for (size_t input = 0; input < request.inputs.size(); ++input) {
220         const std::string message = "removeInput: removed input " + std::to_string(input);
221         validate(preparedModel, message, request, [input](Request* request) {
222             request->inputs.erase(request->inputs.begin() + input);
223         });
224     }
225 }
226 
227 ///////////////////////// REMOVE OUTPUT ////////////////////////////////////
228 
removeOutputTest(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)229 static void removeOutputTest(const std::shared_ptr<IPreparedModel>& preparedModel,
230                              const Request& request) {
231     for (size_t output = 0; output < request.outputs.size(); ++output) {
232         const std::string message = "removeOutput: removed Output " + std::to_string(output);
233         validate(preparedModel, message, request, [output](Request* request) {
234             request->outputs.erase(request->outputs.begin() + output);
235         });
236     }
237 }
238 
239 ///////////////////////////// ENTRY POINT //////////////////////////////////
240 
validateRequest(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)241 void validateRequest(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request) {
242     removeInputTest(preparedModel, request);
243     removeOutputTest(preparedModel, request);
244 }
245 
validateBurst(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)246 void validateBurst(const std::shared_ptr<IPreparedModel>& preparedModel, const Request& request) {
247     // create burst
248     std::shared_ptr<IBurst> burst;
249     auto ret = preparedModel->configureExecutionBurst(&burst);
250     ASSERT_TRUE(ret.isOk()) << ret.getDescription();
251     ASSERT_NE(nullptr, burst.get());
252 
253     const auto test = [&burst, &request](const std::vector<int64_t>& slots) {
254         ExecutionResult executionResult;
255         const auto executeStatus =
256                 burst->executeSynchronously(request, slots, /*measure=*/false, kNoDeadline,
257                                             kOmittedTimeoutDuration, &executionResult);
258         ASSERT_FALSE(executeStatus.isOk());
259         ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
260         ASSERT_EQ(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()),
261                   ErrorStatus::INVALID_ARGUMENT);
262     };
263 
264     int64_t currentSlot = 0;
265     std::vector<int64_t> slots;
266     slots.reserve(request.pools.size());
267     for (const auto& pool : request.pools) {
268         if (pool.getTag() == RequestMemoryPool::Tag::pool) {
269             slots.push_back(currentSlot++);
270         } else {
271             slots.push_back(-1);
272         }
273     }
274 
275     constexpr int64_t invalidSlot = -2;
276 
277     // validate failure when invalid memory identifier token value
278     for (size_t i = 0; i < request.pools.size(); ++i) {
279         const int64_t oldSlotValue = slots[i];
280 
281         slots[i] = invalidSlot;
282         test(slots);
283 
284         slots[i] = oldSlotValue;
285     }
286 
287     // validate failure when request.pools.size() != memoryIdentifierTokens.size()
288     if (request.pools.size() > 0) {
289         slots = std::vector<int64_t>(request.pools.size() - 1, -1);
290         test(slots);
291     }
292 
293     // validate failure when request.pools.size() != memoryIdentifierTokens.size()
294     slots = std::vector<int64_t>(request.pools.size() + 1, -1);
295     test(slots);
296 
297     // validate failure when invalid memory identifier token value
298     const auto freeStatus = burst->releaseMemoryResource(invalidSlot);
299     ASSERT_FALSE(freeStatus.isOk());
300     ASSERT_EQ(freeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
301     ASSERT_EQ(static_cast<ErrorStatus>(freeStatus.getServiceSpecificError()),
302               ErrorStatus::INVALID_ARGUMENT);
303 }
304 
validateRequestFailure(const std::shared_ptr<IPreparedModel> & preparedModel,const Request & request)305 void validateRequestFailure(const std::shared_ptr<IPreparedModel>& preparedModel,
306                             const Request& request) {
307     SCOPED_TRACE("Expecting request to fail [executeSynchronously]");
308     ExecutionResult executionResult;
309     const auto executeStatus = preparedModel->executeSynchronously(
310             request, false, kNoDeadline, kOmittedTimeoutDuration, &executionResult);
311 
312     ASSERT_FALSE(executeStatus.isOk());
313     ASSERT_EQ(executeStatus.getExceptionCode(), EX_SERVICE_SPECIFIC);
314     ASSERT_NE(static_cast<ErrorStatus>(executeStatus.getServiceSpecificError()), ErrorStatus::NONE);
315 }
316 
317 }  // namespace aidl::android::hardware::neuralnetworks::vts::functional
318