1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "Callbacks.h"
18
19 #include "Conversions.h"
20 #include "PreparedModel.h"
21 #include "Utils.h"
22
23 #include <android/hardware/neuralnetworks/1.0/types.h>
24 #include <android/hardware/neuralnetworks/1.2/IExecutionCallback.h>
25 #include <android/hardware/neuralnetworks/1.2/IPreparedModelCallback.h>
26 #include <android/hardware/neuralnetworks/1.2/types.h>
27 #include <nnapi/IPreparedModel.h>
28 #include <nnapi/Result.h>
29 #include <nnapi/Types.h>
30 #include <nnapi/hal/1.0/Callbacks.h>
31 #include <nnapi/hal/1.0/Conversions.h>
32 #include <nnapi/hal/1.0/HandleError.h>
33 #include <nnapi/hal/1.0/PreparedModel.h>
34 #include <nnapi/hal/1.0/ProtectCallback.h>
35 #include <nnapi/hal/CommonUtils.h>
36 #include <nnapi/hal/TransferValue.h>
37
38 #include <utility>
39
40 // See hardware/interfaces/neuralnetworks/utils/README.md for more information on HIDL interface
41 // lifetimes across processes and for protecting asynchronous calls across HIDL.
42
43 namespace android::hardware::neuralnetworks::V1_2::utils {
44 namespace {
45
prepareModelCallback(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)46 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
47 V1_0::ErrorStatus status, const sp<V1_0::IPreparedModel>& preparedModel) {
48 if (const auto dynamicPreparedModel =
49 V1_2::IPreparedModel::castFrom(preparedModel).withDefault(nullptr)) {
50 return V1_2::utils::prepareModelCallback(status, dynamicPreparedModel);
51 }
52 return V1_0::utils::prepareModelCallback(status, preparedModel);
53 }
54
55 nn::GeneralResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>>
convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape> & outputShapes,const Timing & timing)56 convertExecutionGeneralResultsHelper(const hidl_vec<OutputShape>& outputShapes,
57 const Timing& timing) {
58 return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing)));
59 }
60
61 } // namespace
62
prepareModelCallback(V1_0::ErrorStatus status,const sp<IPreparedModel> & preparedModel)63 nn::GeneralResult<nn::SharedPreparedModel> prepareModelCallback(
64 V1_0::ErrorStatus status, const sp<IPreparedModel>& preparedModel) {
65 HANDLE_STATUS_HIDL(status) << "model preparation failed with " << toString(status);
66 return NN_TRY(PreparedModel::create(preparedModel, /*executeSynchronously=*/true));
67 }
68
executionCallback(V1_0::ErrorStatus status,const hidl_vec<OutputShape> & outputShapes,const Timing & timing)69 nn::ExecutionResult<std::pair<std::vector<nn::OutputShape>, nn::Timing>> executionCallback(
70 V1_0::ErrorStatus status, const hidl_vec<OutputShape>& outputShapes, const Timing& timing) {
71 if (status == V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE) {
72 auto canonicalOutputShapes =
73 nn::convert(outputShapes).value_or(std::vector<nn::OutputShape>{});
74 return NN_ERROR(nn::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, std::move(canonicalOutputShapes))
75 << "execution failed with " << toString(status);
76 }
77 HANDLE_STATUS_HIDL(status) << "execution failed with " << toString(status);
78 return convertExecutionGeneralResultsHelper(outputShapes, timing);
79 }
80
notify(V1_0::ErrorStatus status,const sp<V1_0::IPreparedModel> & preparedModel)81 Return<void> PreparedModelCallback::notify(V1_0::ErrorStatus status,
82 const sp<V1_0::IPreparedModel>& preparedModel) {
83 mData.put(prepareModelCallback(status, preparedModel));
84 return Void();
85 }
86
notify_1_2(V1_0::ErrorStatus status,const sp<IPreparedModel> & preparedModel)87 Return<void> PreparedModelCallback::notify_1_2(V1_0::ErrorStatus status,
88 const sp<IPreparedModel>& preparedModel) {
89 mData.put(prepareModelCallback(status, preparedModel));
90 return Void();
91 }
92
notifyAsDeadObject()93 void PreparedModelCallback::notifyAsDeadObject() {
94 mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
95 }
96
get()97 PreparedModelCallback::Data PreparedModelCallback::get() {
98 return mData.take();
99 }
100
101 // ExecutionCallback methods begin here
102
notify(V1_0::ErrorStatus status)103 Return<void> ExecutionCallback::notify(V1_0::ErrorStatus status) {
104 mData.put(V1_0::utils::executionCallback(status));
105 return Void();
106 }
107
notify_1_2(V1_0::ErrorStatus status,const hidl_vec<OutputShape> & outputShapes,const Timing & timing)108 Return<void> ExecutionCallback::notify_1_2(V1_0::ErrorStatus status,
109 const hidl_vec<OutputShape>& outputShapes,
110 const Timing& timing) {
111 mData.put(executionCallback(status, outputShapes, timing));
112 return Void();
113 }
114
notifyAsDeadObject()115 void ExecutionCallback::notifyAsDeadObject() {
116 mData.put(NN_ERROR(nn::ErrorStatus::DEAD_OBJECT) << "Dead object");
117 }
118
get()119 ExecutionCallback::Data ExecutionCallback::get() {
120 return mData.take();
121 }
122
123 } // namespace android::hardware::neuralnetworks::V1_2::utils
124