• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "Device.h"
18 
19 #include "Buffer.h"
20 #include "PreparedModel.h"
21 
22 #include <android-base/logging.h>
23 #include <android/hardware/neuralnetworks/1.0/IPreparedModelCallback.h>
24 #include <android/hardware/neuralnetworks/1.0/types.h>
25 #include <android/hardware/neuralnetworks/1.1/types.h>
26 #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
27 #include <android/hardware/neuralnetworks/1.2/types.h>
28 #include <android/hardware/neuralnetworks/1.3/IDevice.h>
29 #include <android/hardware/neuralnetworks/1.3/IPreparedModelCallback.h>
30 #include <android/hardware/neuralnetworks/1.3/types.h>
31 #include <nnapi/IBuffer.h>
32 #include <nnapi/IDevice.h>
33 #include <nnapi/IPreparedModel.h>
34 #include <nnapi/Result.h>
35 #include <nnapi/TypeUtils.h>
36 #include <nnapi/Types.h>
37 #include <nnapi/hal/1.0/Conversions.h>
38 #include <nnapi/hal/1.0/Utils.h>
39 #include <nnapi/hal/1.1/Conversions.h>
40 #include <nnapi/hal/1.1/Utils.h>
41 #include <nnapi/hal/1.2/Conversions.h>
42 #include <nnapi/hal/1.2/Utils.h>
43 #include <nnapi/hal/1.3/Conversions.h>
44 #include <nnapi/hal/1.3/Utils.h>
45 
46 #include <memory>
47 
48 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
49 // lifetimes across processes and for protecting asynchronous calls across HIDL.
50 
51 namespace android::hardware::neuralnetworks::adapter {
52 namespace {
53 
54 template <typename Type>
convertInput(const Type & object)55 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) {
56     auto result = nn::convert(object);
57     if (!result.has_value()) {
58         result.error().code = nn::ErrorStatus::INVALID_ARGUMENT;
59     }
60     return result;
61 }
62 
63 using PrepareModelResult = nn::GeneralResult<nn::SharedPreparedModel>;
64 
adaptPreparedModel(nn::SharedPreparedModel preparedModel)65 sp<PreparedModel> adaptPreparedModel(nn::SharedPreparedModel preparedModel) {
66     if (preparedModel == nullptr) {
67         return nullptr;
68     }
69     return sp<PreparedModel>::make(std::move(preparedModel));
70 }
71 
notify(V1_0::IPreparedModelCallback * callback,nn::ErrorStatus status,const sp<PreparedModel> & hidlPreparedModel)72 void notify(V1_0::IPreparedModelCallback* callback, nn::ErrorStatus status,
73             const sp<PreparedModel>& hidlPreparedModel) {
74     if (callback != nullptr) {
75         const auto hidlStatus = V1_0::utils::convert(status).value();
76         const auto ret = callback->notify(hidlStatus, hidlPreparedModel);
77         if (!ret.isOk()) {
78             LOG(ERROR) << "V1_0::IPreparedModelCallback::notify failed with " << ret.description();
79         }
80     }
81 }
82 
notify(V1_2::IPreparedModelCallback * callback,nn::ErrorStatus status,const sp<PreparedModel> & hidlPreparedModel)83 void notify(V1_2::IPreparedModelCallback* callback, nn::ErrorStatus status,
84             const sp<PreparedModel>& hidlPreparedModel) {
85     if (callback != nullptr) {
86         const auto hidlStatus = V1_2::utils::convert(status).value();
87         const auto ret = callback->notify_1_2(hidlStatus, hidlPreparedModel);
88         if (!ret.isOk()) {
89             LOG(ERROR) << "V1_2::IPreparedModelCallback::notify_1_2 failed with "
90                        << ret.description();
91         }
92     }
93 }
94 
notify(V1_3::IPreparedModelCallback * callback,nn::ErrorStatus status,const sp<PreparedModel> & hidlPreparedModel)95 void notify(V1_3::IPreparedModelCallback* callback, nn::ErrorStatus status,
96             const sp<PreparedModel>& hidlPreparedModel) {
97     if (callback != nullptr) {
98         const auto hidlStatus = V1_3::utils::convert(status).value();
99         const auto ret = callback->notify_1_3(hidlStatus, hidlPreparedModel);
100         if (!ret.isOk()) {
101             LOG(ERROR) << "V1_3::IPreparedModelCallback::notify_1_3 failed with "
102                        << ret.description();
103         }
104     }
105 }
106 
107 template <typename CallbackType>
notify(CallbackType * callback,PrepareModelResult result)108 void notify(CallbackType* callback, PrepareModelResult result) {
109     if (!result.has_value()) {
110         const auto [message, status] = std::move(result).error();
111         LOG(ERROR) << message;
112         notify(callback, status, nullptr);
113     } else {
114         auto preparedModel = std::move(result).value();
115         auto hidlPreparedModel = adaptPreparedModel(std::move(preparedModel));
116         notify(callback, nn::ErrorStatus::NONE, std::move(hidlPreparedModel));
117     }
118 }
119 
120 template <typename ModelType>
getSupportedOperations(const nn::SharedDevice & device,const ModelType & model)121 nn::GeneralResult<hidl_vec<bool>> getSupportedOperations(const nn::SharedDevice& device,
122                                                          const ModelType& model) {
123     const auto nnModel = NN_TRY(convertInput(model));
124     return NN_TRY(device->getSupportedOperations(nnModel));
125 }
126 
prepareModel(const nn::SharedDevice & device,const Executor & executor,const V1_0::Model & model,const sp<V1_0::IPreparedModelCallback> & callback)127 nn::GeneralResult<void> prepareModel(const nn::SharedDevice& device, const Executor& executor,
128                                      const V1_0::Model& model,
129                                      const sp<V1_0::IPreparedModelCallback>& callback) {
130     if (callback.get() == nullptr) {
131         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
132     }
133 
134     auto nnModel = NN_TRY(convertInput(model));
135 
136     Task task = [device, nnModel = std::move(nnModel), callback] {
137         auto result = device->prepareModel(nnModel, nn::ExecutionPreference::DEFAULT,
138                                            nn::Priority::DEFAULT, {}, {}, {}, {}, {}, {});
139         notify(callback.get(), std::move(result));
140     };
141     executor(std::move(task), {});
142 
143     return {};
144 }
145 
prepareModel_1_1(const nn::SharedDevice & device,const Executor & executor,const V1_1::Model & model,V1_1::ExecutionPreference preference,const sp<V1_0::IPreparedModelCallback> & callback)146 nn::GeneralResult<void> prepareModel_1_1(const nn::SharedDevice& device, const Executor& executor,
147                                          const V1_1::Model& model,
148                                          V1_1::ExecutionPreference preference,
149                                          const sp<V1_0::IPreparedModelCallback>& callback) {
150     if (callback.get() == nullptr) {
151         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
152     }
153 
154     auto nnModel = NN_TRY(convertInput(model));
155     const auto nnPreference = NN_TRY(convertInput(preference));
156 
157     Task task = [device, nnModel = std::move(nnModel), nnPreference, callback] {
158         auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {}, {}, {},
159                                            {}, {}, {});
160         notify(callback.get(), std::move(result));
161     };
162     executor(std::move(task), {});
163 
164     return {};
165 }
166 
prepareModel_1_2(const nn::SharedDevice & device,const Executor & executor,const V1_2::Model & model,V1_1::ExecutionPreference preference,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_2::IPreparedModelCallback> & callback)167 nn::GeneralResult<void> prepareModel_1_2(const nn::SharedDevice& device, const Executor& executor,
168                                          const V1_2::Model& model,
169                                          V1_1::ExecutionPreference preference,
170                                          const hidl_vec<hidl_handle>& modelCache,
171                                          const hidl_vec<hidl_handle>& dataCache,
172                                          const CacheToken& token,
173                                          const sp<V1_2::IPreparedModelCallback>& callback) {
174     if (callback.get() == nullptr) {
175         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
176     }
177 
178     auto nnModel = NN_TRY(convertInput(model));
179     const auto nnPreference = NN_TRY(convertInput(preference));
180     auto nnModelCache = NN_TRY(convertInput(modelCache));
181     auto nnDataCache = NN_TRY(convertInput(dataCache));
182     const auto nnToken = nn::CacheToken(token);
183 
184     Task task = [device, nnModel = std::move(nnModel), nnPreference,
185                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
186                  nnToken, callback] {
187         auto result = device->prepareModel(nnModel, nnPreference, nn::Priority::DEFAULT, {},
188                                            nnModelCache, nnDataCache, nnToken, {}, {});
189         notify(callback.get(), std::move(result));
190     };
191     executor(std::move(task), {});
192 
193     return {};
194 }
195 
prepareModel_1_3(const nn::SharedDevice & device,const Executor & executor,const V1_3::Model & model,V1_1::ExecutionPreference preference,V1_3::Priority priority,const V1_3::OptionalTimePoint & deadline,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_3::IPreparedModelCallback> & callback)196 nn::GeneralResult<void> prepareModel_1_3(
197         const nn::SharedDevice& device, const Executor& executor, const V1_3::Model& model,
198         V1_1::ExecutionPreference preference, V1_3::Priority priority,
199         const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
200         const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
201         const sp<V1_3::IPreparedModelCallback>& callback) {
202     if (callback.get() == nullptr) {
203         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
204     }
205 
206     auto nnModel = NN_TRY(convertInput(model));
207     const auto nnPreference = NN_TRY(convertInput(preference));
208     const auto nnPriority = NN_TRY(convertInput(priority));
209     const auto nnDeadline = NN_TRY(convertInput(deadline));
210     auto nnModelCache = NN_TRY(convertInput(modelCache));
211     auto nnDataCache = NN_TRY(convertInput(dataCache));
212     const auto nnToken = nn::CacheToken(token);
213 
214     Task task = [device, nnModel = std::move(nnModel), nnPreference, nnPriority, nnDeadline,
215                  nnModelCache = std::move(nnModelCache), nnDataCache = std::move(nnDataCache),
216                  nnToken, callback] {
217         auto result = device->prepareModel(nnModel, nnPreference, nnPriority, nnDeadline,
218                                            nnModelCache, nnDataCache, nnToken, {}, {});
219         notify(callback.get(), std::move(result));
220     };
221     executor(std::move(task), nnDeadline);
222 
223     return {};
224 }
225 
prepareModelFromCache(const nn::SharedDevice & device,const Executor & executor,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_2::IPreparedModelCallback> & callback)226 nn::GeneralResult<void> prepareModelFromCache(const nn::SharedDevice& device,
227                                               const Executor& executor,
228                                               const hidl_vec<hidl_handle>& modelCache,
229                                               const hidl_vec<hidl_handle>& dataCache,
230                                               const CacheToken& token,
231                                               const sp<V1_2::IPreparedModelCallback>& callback) {
232     if (callback.get() == nullptr) {
233         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
234     }
235 
236     auto nnModelCache = NN_TRY(convertInput(modelCache));
237     auto nnDataCache = NN_TRY(convertInput(dataCache));
238     const auto nnToken = nn::CacheToken(token);
239 
240     Task task = [device, nnModelCache = std::move(nnModelCache),
241                  nnDataCache = std::move(nnDataCache), nnToken, callback] {
242         auto result = device->prepareModelFromCache({}, nnModelCache, nnDataCache, nnToken);
243         notify(callback.get(), std::move(result));
244     };
245     executor(std::move(task), {});
246 
247     return {};
248 }
249 
prepareModelFromCache_1_3(const nn::SharedDevice & device,const Executor & executor,const V1_3::OptionalTimePoint & deadline,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_3::IPreparedModelCallback> & callback)250 nn::GeneralResult<void> prepareModelFromCache_1_3(
251         const nn::SharedDevice& device, const Executor& executor,
252         const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
253         const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
254         const sp<V1_3::IPreparedModelCallback>& callback) {
255     if (callback.get() == nullptr) {
256         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Invalid callback";
257     }
258 
259     const auto nnDeadline = NN_TRY(convertInput(deadline));
260     auto nnModelCache = NN_TRY(convertInput(modelCache));
261     auto nnDataCache = NN_TRY(convertInput(dataCache));
262     const auto nnToken = nn::CacheToken(token);
263 
264     auto task = [device, nnDeadline, nnModelCache = std::move(nnModelCache),
265                  nnDataCache = std::move(nnDataCache), nnToken, callback] {
266         auto result = device->prepareModelFromCache(nnDeadline, nnModelCache, nnDataCache, nnToken);
267         notify(callback.get(), std::move(result));
268     };
269     executor(std::move(task), nnDeadline);
270 
271     return {};
272 }
273 
downcast(const sp<V1_3::IPreparedModel> & preparedModel)274 nn::GeneralResult<nn::SharedPreparedModel> downcast(const sp<V1_3::IPreparedModel>& preparedModel) {
275     if (preparedModel == nullptr) {
276         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "preparedModel is nullptr";
277     }
278     if (preparedModel->isRemote()) {
279         return NN_ERROR(nn::ErrorStatus::INVALID_ARGUMENT) << "Cannot convert remote models";
280     }
281 
282     // This static_cast is safe because adapter::PreparedModel is the only class that implements
283     // the IPreparedModel interface in the adapter service code.
284     const auto* casted = static_cast<const PreparedModel*>(preparedModel.get());
285     return casted->getUnderlyingPreparedModel();
286 }
287 
downcastAll(const hidl_vec<sp<V1_3::IPreparedModel>> & preparedModels)288 nn::GeneralResult<std::vector<nn::SharedPreparedModel>> downcastAll(
289         const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels) {
290     std::vector<nn::SharedPreparedModel> canonical;
291     canonical.reserve(preparedModels.size());
292     for (const auto& preparedModel : preparedModels) {
293         canonical.push_back(NN_TRY(downcast(preparedModel)));
294     }
295     return canonical;
296 }
297 
allocate(const nn::SharedDevice & device,const V1_3::BufferDesc & desc,const hidl_vec<sp<V1_3::IPreparedModel>> & preparedModels,const hidl_vec<V1_3::BufferRole> & inputRoles,const hidl_vec<V1_3::BufferRole> & outputRoles)298 nn::GeneralResult<std::pair<sp<V1_3::IBuffer>, uint32_t>> allocate(
299         const nn::SharedDevice& device, const V1_3::BufferDesc& desc,
300         const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
301         const hidl_vec<V1_3::BufferRole>& inputRoles,
302         const hidl_vec<V1_3::BufferRole>& outputRoles) {
303     auto nnDesc = NN_TRY(convertInput(desc));
304     auto nnPreparedModels = NN_TRY(downcastAll(preparedModels));
305     auto nnInputRoles = NN_TRY(convertInput(inputRoles));
306     auto nnOutputRoles = NN_TRY(convertInput(outputRoles));
307 
308     auto buffer = NN_TRY(device->allocate(nnDesc, nnPreparedModels, nnInputRoles, nnOutputRoles));
309 
310     const nn::Request::MemoryDomainToken token = buffer->getToken();
311     auto hidlBuffer = sp<Buffer>::make(std::move(buffer));
312     return std::make_pair(std::move(hidlBuffer), static_cast<uint32_t>(token));
313 }
314 
315 }  // namespace
316 
Device(nn::SharedDevice device,Executor executor)317 Device::Device(nn::SharedDevice device, Executor executor)
318     : kDevice(std::move(device)), kExecutor(std::move(executor)) {
319     CHECK(kDevice != nullptr);
320     CHECK(kExecutor != nullptr);
321 }
322 
getCapabilities(getCapabilities_cb cb)323 Return<void> Device::getCapabilities(getCapabilities_cb cb) {
324     const auto capabilities = V1_0::utils::convert(kDevice->getCapabilities()).value();
325     cb(V1_0::ErrorStatus::NONE, capabilities);
326     return Void();
327 }
328 
getCapabilities_1_1(getCapabilities_1_1_cb cb)329 Return<void> Device::getCapabilities_1_1(getCapabilities_1_1_cb cb) {
330     const auto capabilities = V1_1::utils::convert(kDevice->getCapabilities()).value();
331     cb(V1_0::ErrorStatus::NONE, capabilities);
332     return Void();
333 }
334 
getCapabilities_1_2(getCapabilities_1_2_cb cb)335 Return<void> Device::getCapabilities_1_2(getCapabilities_1_2_cb cb) {
336     const auto capabilities = V1_2::utils::convert(kDevice->getCapabilities()).value();
337     cb(V1_0::ErrorStatus::NONE, capabilities);
338     return Void();
339 }
340 
getCapabilities_1_3(getCapabilities_1_3_cb cb)341 Return<void> Device::getCapabilities_1_3(getCapabilities_1_3_cb cb) {
342     const auto capabilities = V1_3::utils::convert(kDevice->getCapabilities()).value();
343     cb(V1_3::ErrorStatus::NONE, capabilities);
344     return Void();
345 }
346 
getVersionString(getVersionString_cb cb)347 Return<void> Device::getVersionString(getVersionString_cb cb) {
348     cb(V1_0::ErrorStatus::NONE, kDevice->getVersionString());
349     return Void();
350 }
351 
getType(getType_cb cb)352 Return<void> Device::getType(getType_cb cb) {
353     const auto maybeDeviceType = V1_2::utils::convert(kDevice->getType());
354     if (!maybeDeviceType.has_value()) {
355         const auto& [message, code] = maybeDeviceType.error();
356         LOG(ERROR) << "adapter::Device::getType failed with " << code << ": " << message;
357         cb(V1_2::utils::convert(code).value(), {});
358     } else {
359         cb(V1_0::ErrorStatus::NONE, maybeDeviceType.value());
360     }
361     return Void();
362 }
363 
getSupportedExtensions(getSupportedExtensions_cb cb)364 Return<void> Device::getSupportedExtensions(getSupportedExtensions_cb cb) {
365     const auto maybeSupportedExtensions = V1_2::utils::convert(kDevice->getSupportedExtensions());
366     if (!maybeSupportedExtensions.has_value()) {
367         const auto& [message, code] = maybeSupportedExtensions.error();
368         LOG(ERROR) << "adapter::Device::getSupportedExtensions failed with " << code << ": "
369                    << message;
370         cb(V1_2::utils::convert(code).value(), {});
371     } else {
372         cb(V1_0::ErrorStatus::NONE, maybeSupportedExtensions.value());
373     }
374     return Void();
375 }
376 
getSupportedOperations(const V1_0::Model & model,getSupportedOperations_cb cb)377 Return<void> Device::getSupportedOperations(const V1_0::Model& model,
378                                             getSupportedOperations_cb cb) {
379     const auto result = adapter::getSupportedOperations(kDevice, model);
380     if (!result.has_value()) {
381         const auto& [message, code] = result.error();
382         LOG(ERROR) << "adapter::Device::getSupportedOperations_1_0 failed with " << code << ": "
383                    << message;
384         cb(V1_0::utils::convert(code).value(), {});
385     } else {
386         cb(V1_0::ErrorStatus::NONE, result.value());
387     }
388     return Void();
389 }
390 
getSupportedOperations_1_1(const V1_1::Model & model,getSupportedOperations_1_1_cb cb)391 Return<void> Device::getSupportedOperations_1_1(const V1_1::Model& model,
392                                                 getSupportedOperations_1_1_cb cb) {
393     const auto result = adapter::getSupportedOperations(kDevice, model);
394     if (!result.has_value()) {
395         const auto& [message, code] = result.error();
396         LOG(ERROR) << "adapter::Device::getSupportedOperations_1_1 failed with " << code << ": "
397                    << message;
398         cb(V1_1::utils::convert(code).value(), {});
399     } else {
400         cb(V1_0::ErrorStatus::NONE, result.value());
401     }
402     return Void();
403 }
404 
getSupportedOperations_1_2(const V1_2::Model & model,getSupportedOperations_1_2_cb cb)405 Return<void> Device::getSupportedOperations_1_2(const V1_2::Model& model,
406                                                 getSupportedOperations_1_2_cb cb) {
407     const auto result = adapter::getSupportedOperations(kDevice, model);
408     if (!result.has_value()) {
409         const auto& [message, code] = result.error();
410         LOG(ERROR) << "adapter::Device::getSupportedOperations_1_2 failed with " << code << ": "
411                    << message;
412         cb(V1_2::utils::convert(code).value(), {});
413     } else {
414         cb(V1_0::ErrorStatus::NONE, result.value());
415     }
416     return Void();
417 }
418 
getSupportedOperations_1_3(const V1_3::Model & model,getSupportedOperations_1_3_cb cb)419 Return<void> Device::getSupportedOperations_1_3(const V1_3::Model& model,
420                                                 getSupportedOperations_1_3_cb cb) {
421     const auto result = adapter::getSupportedOperations(kDevice, model);
422     if (!result.has_value()) {
423         const auto& [message, code] = result.error();
424         LOG(ERROR) << "adapter::Device::getSupportedOperations_1_3 failed with " << code << ": "
425                    << message;
426         cb(V1_3::utils::convert(code).value(), {});
427     } else {
428         cb(V1_3::ErrorStatus::NONE, result.value());
429     }
430     return Void();
431 }
432 
getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb)433 Return<void> Device::getNumberOfCacheFilesNeeded(getNumberOfCacheFilesNeeded_cb cb) {
434     const auto [numModelCache, numDataCache] = kDevice->getNumberOfCacheFilesNeeded();
435     cb(V1_0::ErrorStatus::NONE, numModelCache, numDataCache);
436     return Void();
437 }
438 
prepareModel(const V1_0::Model & model,const sp<V1_0::IPreparedModelCallback> & callback)439 Return<V1_0::ErrorStatus> Device::prepareModel(const V1_0::Model& model,
440                                                const sp<V1_0::IPreparedModelCallback>& callback) {
441     auto result = adapter::prepareModel(kDevice, kExecutor, model, callback);
442     if (!result.has_value()) {
443         auto [message, code] = std::move(result).error();
444         LOG(ERROR) << "adapter::Device::prepareModel failed with " << code << ": " << message;
445         notify(callback.get(), code, nullptr);
446         return V1_0::utils::convert(code).value();
447     }
448     return V1_0::ErrorStatus::NONE;
449 }
450 
prepareModel_1_1(const V1_1::Model & model,V1_1::ExecutionPreference preference,const sp<V1_0::IPreparedModelCallback> & callback)451 Return<V1_0::ErrorStatus> Device::prepareModel_1_1(
452         const V1_1::Model& model, V1_1::ExecutionPreference preference,
453         const sp<V1_0::IPreparedModelCallback>& callback) {
454     auto result = adapter::prepareModel_1_1(kDevice, kExecutor, model, preference, callback);
455     if (!result.has_value()) {
456         auto [message, code] = std::move(result).error();
457         LOG(ERROR) << "adapter::Device::prepareModel_1_1 failed with " << code << ": " << message;
458         notify(callback.get(), code, nullptr);
459         return V1_1::utils::convert(code).value();
460     }
461     return V1_0::ErrorStatus::NONE;
462 }
463 
prepareModel_1_2(const V1_2::Model & model,V1_1::ExecutionPreference preference,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_2::IPreparedModelCallback> & callback)464 Return<V1_0::ErrorStatus> Device::prepareModel_1_2(
465         const V1_2::Model& model, V1_1::ExecutionPreference preference,
466         const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
467         const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
468     auto result = adapter::prepareModel_1_2(kDevice, kExecutor, model, preference, modelCache,
469                                             dataCache, token, callback);
470     if (!result.has_value()) {
471         auto [message, code] = std::move(result).error();
472         LOG(ERROR) << "adapter::Device::prepareModel_1_2 failed with " << code << ": " << message;
473         notify(callback.get(), code, nullptr);
474         return V1_2::utils::convert(code).value();
475     }
476     return V1_0::ErrorStatus::NONE;
477 }
478 
prepareModel_1_3(const V1_3::Model & model,V1_1::ExecutionPreference preference,V1_3::Priority priority,const V1_3::OptionalTimePoint & deadline,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_3::IPreparedModelCallback> & callback)479 Return<V1_3::ErrorStatus> Device::prepareModel_1_3(
480         const V1_3::Model& model, V1_1::ExecutionPreference preference, V1_3::Priority priority,
481         const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
482         const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
483         const sp<V1_3::IPreparedModelCallback>& callback) {
484     auto result = adapter::prepareModel_1_3(kDevice, kExecutor, model, preference, priority,
485                                             deadline, modelCache, dataCache, token, callback);
486     if (!result.has_value()) {
487         auto [message, code] = std::move(result).error();
488         LOG(ERROR) << "adapter::Device::prepareModel_1_3 failed with " << code << ": " << message;
489         notify(callback.get(), code, nullptr);
490         return V1_3::utils::convert(code).value();
491     }
492     return V1_3::ErrorStatus::NONE;
493 }
494 
prepareModelFromCache(const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_2::IPreparedModelCallback> & callback)495 Return<V1_0::ErrorStatus> Device::prepareModelFromCache(
496         const hidl_vec<hidl_handle>& modelCache, const hidl_vec<hidl_handle>& dataCache,
497         const CacheToken& token, const sp<V1_2::IPreparedModelCallback>& callback) {
498     auto result = adapter::prepareModelFromCache(kDevice, kExecutor, modelCache, dataCache, token,
499                                                  callback);
500     if (!result.has_value()) {
501         auto [message, code] = std::move(result).error();
502         LOG(ERROR) << "adapter::Device::prepareModelFromCache failed with " << code << ": "
503                    << message;
504         notify(callback.get(), code, nullptr);
505         return V1_2::utils::convert(code).value();
506     }
507     return V1_0::ErrorStatus::NONE;
508 }
509 
prepareModelFromCache_1_3(const V1_3::OptionalTimePoint & deadline,const hidl_vec<hidl_handle> & modelCache,const hidl_vec<hidl_handle> & dataCache,const CacheToken & token,const sp<V1_3::IPreparedModelCallback> & callback)510 Return<V1_3::ErrorStatus> Device::prepareModelFromCache_1_3(
511         const V1_3::OptionalTimePoint& deadline, const hidl_vec<hidl_handle>& modelCache,
512         const hidl_vec<hidl_handle>& dataCache, const CacheToken& token,
513         const sp<V1_3::IPreparedModelCallback>& callback) {
514     auto result = adapter::prepareModelFromCache_1_3(kDevice, kExecutor, deadline, modelCache,
515                                                      dataCache, token, callback);
516     if (!result.has_value()) {
517         auto [message, code] = std::move(result).error();
518         LOG(ERROR) << "adapter::Device::prepareModelFromCache_1_3 failed with " << code << ": "
519                    << message;
520         notify(callback.get(), code, nullptr);
521         return V1_3::utils::convert(code).value();
522     }
523     return V1_3::ErrorStatus::NONE;
524 }
525 
getStatus()526 Return<V1_0::DeviceStatus> Device::getStatus() {
527     return V1_0::DeviceStatus::AVAILABLE;
528 }
529 
allocate(const V1_3::BufferDesc & desc,const hidl_vec<sp<V1_3::IPreparedModel>> & preparedModels,const hidl_vec<V1_3::BufferRole> & inputRoles,const hidl_vec<V1_3::BufferRole> & outputRoles,allocate_cb cb)530 Return<void> Device::allocate(const V1_3::BufferDesc& desc,
531                               const hidl_vec<sp<V1_3::IPreparedModel>>& preparedModels,
532                               const hidl_vec<V1_3::BufferRole>& inputRoles,
533                               const hidl_vec<V1_3::BufferRole>& outputRoles, allocate_cb cb) {
534     auto result = adapter::allocate(kDevice, desc, preparedModels, inputRoles, outputRoles);
535     if (!result.has_value()) {
536         const auto [message, code] = std::move(result).error();
537         LOG(ERROR) << "adapter::Device::allocate failed with " << code << ": " << message;
538         cb(V1_3::utils::convert(code).value(), nullptr, /*token=*/0);
539         return Void();
540     }
541     auto [buffer, token] = std::move(result).value();
542     cb(V1_3::ErrorStatus::NONE, buffer, token);
543     return Void();
544 }
545 
546 }  // namespace android::hardware::neuralnetworks::adapter
547