/hardware/interfaces/neuralnetworks/1.3/utils/include/nnapi/hal/1.3/ |
D | Conversions.h | 51 GeneralResult<Priority> convert(const hal::V1_3::Priority& priority); 52 GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities); 53 GeneralResult<Model> convert(const hal::V1_3::Model& model); 54 GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc); 55 GeneralResult<Request> convert(const hal::V1_3::Request& request); 56 GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint); 57 GeneralResult<OptionalDuration> convert( 59 GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus); 61 GeneralResult<SharedHandle> convert(const hardware::hidl_handle& handle); 62 GeneralResult<std::vector<BufferRole>> convert( [all …]
|
D | Utils.h | 46 const auto maybeCanonical = nn::convert(halObject); in validate() 73 -> decltype(convert(nn::convert(nonCanonicalObject).value())) { 74 return convert(NN_TRY(nn::convert(nonCanonicalObject)));
|
/hardware/interfaces/keymaster/4.1/support/ |
D | Keymaster3.cpp | 29 V4_0::ErrorCode convert(V3_0::ErrorCode error) { in convert() function 33 V3_0::KeyPurpose convert(KeyPurpose purpose) { in convert() function 37 V3_0::KeyFormat convert(KeyFormat purpose) { in convert() function 41 V3_0::KeyParameter convert(const KeyParameter& param) { in convert() function 50 KeyParameter convert(const V3_0::KeyParameter& param) { in convert() function 59 hidl_vec<V3_0::KeyParameter> convert(const hidl_vec<KeyParameter>& params) { in convert() function 62 converted[i] = convert(params[i]); in convert() 67 hidl_vec<KeyParameter> convert(const hidl_vec<V3_0::KeyParameter>& params) { in convert() function 70 converted[i] = convert(params[i]); in convert() 85 converted[i] = convert(params[i]); in convertAndAddAuthToken() [all …]
|
/hardware/interfaces/neuralnetworks/aidl/utils/include/nnapi/hal/aidl/ |
D | Conversions.h | 112 GeneralResult<Capabilities> convert(const aidl_hal::Capabilities& capabilities); 113 GeneralResult<DeviceType> convert(const aidl_hal::DeviceType& deviceType); 114 GeneralResult<ErrorStatus> convert(const aidl_hal::ErrorStatus& errorStatus); 115 GeneralResult<ExecutionPreference> convert( 117 GeneralResult<SharedMemory> convert(const aidl_hal::Memory& memory); 118 GeneralResult<Model> convert(const aidl_hal::Model& model); 119 GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType); 120 GeneralResult<Priority> convert(const aidl_hal::Priority& priority); 121 GeneralResult<Request> convert(const aidl_hal::Request& request); 122 GeneralResult<Timing> convert(const aidl_hal::Timing& timing); [all …]
|
D | Utils.h | 54 const auto maybeCanonical = nn::convert(halObject); in validate() 81 -> decltype(convert(nn::convert(nonCanonicalObject).value())) { 82 return convert(NN_TRY(nn::convert(nonCanonicalObject))); 102 if (const ::android::nn::ErrorStatus canonical = ::android::nn::convert(status).value_or( \
|
/hardware/interfaces/neuralnetworks/1.2/utils/include/nnapi/hal/1.2/ |
D | Conversions.h | 49 GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType); 50 GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities); 51 GeneralResult<Model> convert(const hal::V1_2::Model& model); 52 GeneralResult<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming); 53 GeneralResult<Timing> convert(const hal::V1_2::Timing& timing); 54 GeneralResult<SharedMemory> convert(const hardware::hidl_memory& memory); 56 GeneralResult<std::vector<Extension>> convert( 58 GeneralResult<std::vector<SharedHandle>> convert( 60 GeneralResult<std::vector<OutputShape>> convert( 89 nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType); [all …]
|
D | Utils.h | 46 const auto maybeCanonical = nn::convert(halObject); in validate() 73 -> decltype(convert(nn::convert(nonCanonicalObject).value())) { 74 return convert(NN_TRY(nn::convert(nonCanonicalObject)));
|
/hardware/interfaces/neuralnetworks/1.1/utils/include/nnapi/hal/1.1/ |
D | Conversions.h | 34 GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities); 35 GeneralResult<Model> convert(const hal::V1_1::Model& model); 36 GeneralResult<ExecutionPreference> convert( 50 nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities); 51 nn::GeneralResult<Model> convert(const nn::Model& model); 52 nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference); 54 nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus); 55 nn::GeneralResult<V1_0::Request> convert(const nn::Request& request); 56 nn::GeneralResult<V1_0::ErrorStatus> convert(const nn::ErrorStatus& status);
|
D | Utils.h | 37 const auto maybeCanonical = nn::convert(halObject); in validate() 64 -> decltype(convert(nn::convert(nonCanonicalObject).value())) { 65 return convert(NN_TRY(nn::convert(nonCanonicalObject)));
|
/hardware/interfaces/neuralnetworks/aidl/utils/src/ |
D | PreparedModel.cpp | 47 return std::make_pair(NN_TRY(nn::convert(outputShapes)), NN_TRY(nn::convert(timing))); in convertExecutionResults() 54 return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); in convertFencedExecutionResults() 61 nn::convert(result.outputShapes).value_or(std::vector<nn::OutputShape>{}); in handleExecutionResult() 79 resultSyncFence = nn::SyncFence::create(NN_TRY(nn::convert(result.syncFence))).value(); in handleFencedExecutionResult() 141 const auto aidlRequest = NN_TRY(convert(requestInShared)); in execute() 142 const auto aidlMeasure = NN_TRY(convert(measure)); in execute() 143 const auto aidlDeadline = NN_TRY(convert(deadline)); in execute() 144 const auto aidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration)); in execute() 161 auto aidlHints = NN_TRY(convert(hints)); in executeInternal() 162 auto aidlExtensionPrefix = NN_TRY(convert(extensionNameToPrefix)); in executeInternal() [all …]
|
D | Device.cpp | 51 nn::GeneralResult<std::vector<std::shared_ptr<IPreparedModel>>> convert( in convert() function 72 return nn::convert(capabilities); in getCapabilitiesFrom() 88 return nn::convert(deviceType); in getDeviceTypeFrom() 97 return nn::convert(supportedExtensions); in getSupportedExtensionsFrom() 206 const auto aidlModel = NN_TRY(convert(modelInShared)); in getSupportedOperations() 226 const auto aidlModel = NN_TRY(convert(modelInShared)); in prepareModel() 227 const auto aidlPreference = NN_TRY(convert(preference)); in prepareModel() 228 const auto aidlPriority = NN_TRY(convert(priority)); in prepareModel() 229 const auto aidlDeadline = NN_TRY(convert(deadline)); in prepareModel() 230 auto aidlModelCache = NN_TRY(convert(modelCache)); in prepareModel() [all …]
|
D | Conversions.cpp | 544 GeneralResult<Capabilities> convert(const aidl_hal::Capabilities& capabilities) { in convert() function 548 GeneralResult<DeviceType> convert(const aidl_hal::DeviceType& deviceType) { in convert() function 552 GeneralResult<ErrorStatus> convert(const aidl_hal::ErrorStatus& errorStatus) { in convert() function 556 GeneralResult<ExecutionPreference> convert( in convert() function 561 GeneralResult<SharedMemory> convert(const aidl_hal::Memory& operand) { in convert() function 565 GeneralResult<Model> convert(const aidl_hal::Model& model) { in convert() function 569 GeneralResult<OperandType> convert(const aidl_hal::OperandType& operandType) { in convert() function 573 GeneralResult<Priority> convert(const aidl_hal::Priority& priority) { in convert() function 577 GeneralResult<Request> convert(const aidl_hal::Request& request) { in convert() function 581 GeneralResult<Timing> convert(const aidl_hal::Timing& timing) { in convert() function [all …]
|
/hardware/interfaces/neuralnetworks/1.3/utils/src/ |
D | Device.cpp | 57 nn::GeneralResult<hidl_vec<sp<IPreparedModel>>> convert( in convert() function 76 return nn::convert(capabilities); in capabilitiesCallback() 177 const auto hidlModel = NN_TRY(convert(modelInShared)); in getSupportedOperations() 198 const auto hidlModel = NN_TRY(convert(modelInShared)); in prepareModel() 199 const auto hidlPreference = NN_TRY(convert(preference)); in prepareModel() 200 const auto hidlPriority = NN_TRY(convert(priority)); in prepareModel() 201 const auto hidlDeadline = NN_TRY(convert(deadline)); in prepareModel() 202 const auto hidlModelCache = NN_TRY(convert(modelCache)); in prepareModel() 203 const auto hidlDataCache = NN_TRY(convert(dataCache)); in prepareModel() 221 const auto hidlDeadline = NN_TRY(convert(deadline)); in prepareModelFromCache() [all …]
|
D | PreparedModel.cpp | 54 return std::make_pair(NN_TRY(nn::convert(timingLaunched)), NN_TRY(nn::convert(timingFenced))); in convertFencedExecutionCallbackResults() 64 auto sharedHandle = NN_TRY(nn::convert(syncFence)); in fencedExecutionCallback() 148 const auto hidlRequest = NN_TRY(convert(requestInShared)); in execute() 149 const auto hidlMeasure = NN_TRY(convert(measure)); in execute() 150 const auto hidlDeadline = NN_TRY(convert(deadline)); in execute() 151 const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration)); in execute() 192 const auto hidlRequest = NN_TRY(convert(requestInShared)); in executeFenced() 194 const auto hidlMeasure = NN_TRY(convert(measure)); in executeFenced() 195 const auto hidlDeadline = NN_TRY(convert(deadline)); in executeFenced() 196 const auto hidlLoopTimeoutDuration = NN_TRY(convert(loopTimeoutDuration)); in executeFenced() [all …]
|
D | Conversions.cpp | 338 GeneralResult<Priority> convert(const hal::V1_3::Priority& priority) { in convert() function 342 GeneralResult<Capabilities> convert(const hal::V1_3::Capabilities& capabilities) { in convert() function 346 GeneralResult<Model> convert(const hal::V1_3::Model& model) { in convert() function 350 GeneralResult<BufferDesc> convert(const hal::V1_3::BufferDesc& bufferDesc) { in convert() function 354 GeneralResult<Request> convert(const hal::V1_3::Request& request) { in convert() function 358 GeneralResult<OptionalTimePoint> convert(const hal::V1_3::OptionalTimePoint& optionalTimePoint) { in convert() function 362 GeneralResult<OptionalDuration> convert( in convert() function 367 GeneralResult<ErrorStatus> convert(const hal::V1_3::ErrorStatus& errorStatus) { in convert() function 371 GeneralResult<SharedHandle> convert(const hardware::hidl_handle& handle) { in convert() function 375 GeneralResult<std::vector<BufferRole>> convert( in convert() function [all …]
|
/hardware/interfaces/neuralnetworks/1.0/utils/include/nnapi/hal/1.0/ |
D | Utils.h | 35 const auto maybeCanonical = nn::convert(halObject); in validate() 62 -> decltype(convert(nn::convert(nonCanonicalObject).value())) { 63 return convert(NN_TRY(nn::convert(nonCanonicalObject)));
|
D | Conversions.h | 47 GeneralResult<DeviceStatus> convert(const hal::V1_0::DeviceStatus& deviceStatus); 48 GeneralResult<Capabilities> convert(const hal::V1_0::Capabilities& capabilities); 49 GeneralResult<Model> convert(const hal::V1_0::Model& model); 50 GeneralResult<Request> convert(const hal::V1_0::Request& request); 51 GeneralResult<ErrorStatus> convert(const hal::V1_0::ErrorStatus& status); 77 nn::GeneralResult<DeviceStatus> convert(const nn::DeviceStatus& deviceStatus); 78 nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities); 79 nn::GeneralResult<Model> convert(const nn::Model& model); 80 nn::GeneralResult<Request> convert(const nn::Request& request); 81 nn::GeneralResult<ErrorStatus> convert(const nn::ErrorStatus& status);
|
/hardware/interfaces/neuralnetworks/utils/adapter/hidl/src/ |
D | PreparedModel.cpp | 50 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) { in convertInput() 51 auto result = nn::convert(object); in convertInput() 70 V1_3::utils::convert(code).value_or(V1_3::ErrorStatus::GENERAL_FAILURE); in getExecutionInfo() 76 const auto hidlTimingLaunched = V1_3::utils::convert(timingLaunched).value(); in getExecutionInfo() 77 const auto hidlTimingFenced = V1_3::utils::convert(timingFenced).value(); in getExecutionInfo() 91 const auto hidlStatus = V1_0::utils::convert(status).value(); in notify() 102 const auto hidlStatus = V1_2::utils::convert(status).value(); in notify() 103 const auto hidlOutputShapes = V1_2::utils::convert(outputShapes).value(); in notify() 104 const auto hidlTiming = V1_2::utils::convert(timing).value(); in notify() 115 const auto hidlStatus = V1_3::utils::convert(status).value(); in notify() [all …]
|
D | Device.cpp | 55 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) { in convertInput() 56 auto result = nn::convert(object); in convertInput() 75 const auto hidlStatus = V1_0::utils::convert(status).value(); in notify() 86 const auto hidlStatus = V1_2::utils::convert(status).value(); in notify() 98 const auto hidlStatus = V1_3::utils::convert(status).value(); in notify() 324 const auto capabilities = V1_0::utils::convert(kDevice->getCapabilities()).value(); in getCapabilities() 330 const auto capabilities = V1_1::utils::convert(kDevice->getCapabilities()).value(); in getCapabilities_1_1() 336 const auto capabilities = V1_2::utils::convert(kDevice->getCapabilities()).value(); in getCapabilities_1_2() 342 const auto capabilities = V1_3::utils::convert(kDevice->getCapabilities()).value(); in getCapabilities_1_3() 353 const auto maybeDeviceType = V1_2::utils::convert(kDevice->getType()); in getType() [all …]
|
D | Buffer.cpp | 35 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) { in convertInput() 36 auto result = nn::convert(object); in convertInput() 67 return V1_3::utils::convert(code).value(); in copyTo() 78 return V1_3::utils::convert(code).value(); in copyFrom()
|
/hardware/interfaces/neuralnetworks/1.1/utils/src/ |
D | Conversions.cpp | 137 GeneralResult<Capabilities> convert(const hal::V1_1::Capabilities& capabilities) { in convert() function 141 GeneralResult<Model> convert(const hal::V1_1::Model& model) { in convert() function 145 GeneralResult<ExecutionPreference> convert( in convert() function 259 nn::GeneralResult<Capabilities> convert(const nn::Capabilities& capabilities) { in convert() function 263 nn::GeneralResult<Model> convert(const nn::Model& model) { in convert() function 267 nn::GeneralResult<ExecutionPreference> convert(const nn::ExecutionPreference& executionPreference) { in convert() function 271 nn::GeneralResult<V1_0::DeviceStatus> convert(const nn::DeviceStatus& deviceStatus) { in convert() function 272 return V1_0::utils::convert(deviceStatus); in convert() 275 nn::GeneralResult<V1_0::Request> convert(const nn::Request& request) { in convert() function 276 return V1_0::utils::convert(request); in convert() [all …]
|
/hardware/interfaces/neuralnetworks/utils/adapter/aidl/src/ |
D | PreparedModel.cpp | 56 *timingLaunched = utils::convert(nnTimingLaunched).value(); in getExecutionInfo() 57 *timingFenced = utils::convert(nnTimingFenced).value(); in getExecutionInfo() 63 const auto aidlStatus = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE); in getExecutionInfo() 76 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) { in convertInput() 77 auto result = nn::convert(object); in convertInput() 140 .outputShapes = utils::convert(outputShapes).value(), in executeSynchronously() 146 .outputShapes = utils::convert(outputShapes).value(), in executeSynchronously() 147 .timing = utils::convert(timing).value()}; in executeSynchronously() 204 .outputShapes = utils::convert(outputShapes).value(), in executeSynchronously() 210 .outputShapes = utils::convert(outputShapes).value(), in executeSynchronously() [all …]
|
D | Buffer.cpp | 32 auto convertInput(const Type& object) -> decltype(nn::convert(std::declval<Type>())) { in convertInput() 33 auto result = nn::convert(object); in convertInput() 70 const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE); in copyTo() 81 const auto aidlCode = utils::convert(code).value_or(ErrorStatus::GENERAL_FAILURE); in copyFrom()
|
/hardware/interfaces/neuralnetworks/1.2/utils/src/ |
D | Conversions.cpp | 278 GeneralResult<DeviceType> convert(const hal::V1_2::DeviceType& deviceType) { in convert() function 282 GeneralResult<Capabilities> convert(const hal::V1_2::Capabilities& capabilities) { in convert() function 286 GeneralResult<Model> convert(const hal::V1_2::Model& model) { in convert() function 290 GeneralResult<MeasureTiming> convert(const hal::V1_2::MeasureTiming& measureTiming) { in convert() function 294 GeneralResult<Timing> convert(const hal::V1_2::Timing& timing) { in convert() function 298 GeneralResult<SharedMemory> convert(const hardware::hidl_memory& memory) { in convert() function 302 GeneralResult<std::vector<Extension>> convert(const hidl_vec<hal::V1_2::Extension>& extensions) { in convert() function 306 GeneralResult<std::vector<SharedHandle>> convert(const hidl_vec<hidl_handle>& handles) { in convert() function 310 GeneralResult<std::vector<OutputShape>> convert( in convert() function 569 nn::GeneralResult<DeviceType> convert(const nn::DeviceType& deviceType) { in convert() function [all …]
|
D | Device.cpp | 53 return nn::convert(capabilities); in capabilitiesCallback() 65 return nn::convert(deviceType); in deviceTypeCallback() 71 return nn::convert(extensions); in supportedExtensionsCallback() 226 const auto hidlModel = NN_TRY(convert(modelInShared)); in getSupportedOperations() 247 const auto hidlModel = NN_TRY(convert(modelInShared)); in prepareModel() 248 const auto hidlPreference = NN_TRY(convert(preference)); in prepareModel() 249 const auto hidlModelCache = NN_TRY(convert(modelCache)); in prepareModel() 250 const auto hidlDataCache = NN_TRY(convert(dataCache)); in prepareModel() 267 const auto hidlModelCache = NN_TRY(convert(modelCache)); in prepareModelFromCache() 268 const auto hidlDataCache = NN_TRY(convert(dataCache)); in prepareModelFromCache()
|