/packages/modules/NeuralNetworks/shim_and_sl/ |
D | ShimDeviceManager.cpp | 84 const std::shared_ptr<const NnApiSupportLibrary>& nnapi) { in getNamedDevices() argument 86 if (nnapi->getFL5()->ANeuralNetworks_getDeviceCount(&numDevices) != ANEURALNETWORKS_NO_ERROR) { in getNamedDevices() 94 if (nnapi->getFL5()->ANeuralNetworks_getDevice(i, &device) != ANEURALNETWORKS_NO_ERROR) { in getNamedDevices() 100 if (nnapi->getFL5()->ANeuralNetworksDevice_getName(device, &name) != in getNamedDevices() 135 std::shared_ptr<const NnApiSupportLibrary> nnapi; in registerDevices() local 138 nnapi = std::make_unique<NnApiSupportLibrary>( in registerDevices() 142 nnapi = std::make_unique<NnApiSupportLibrary>( in registerDevices() 146 nnapi = std::make_unique<NnApiSupportLibrary>( in registerDevices() 150 nnapi = std::make_unique<NnApiSupportLibrary>( in registerDevices() 154 CHECK_NE(nnapi, nullptr); in registerDevices() [all …]
|
D | ShimDevice.cpp | 102 Capabilities getCapabilities(const NnApiSupportLibrary* nnapi, ANeuralNetworksDevice* device) { in getCapabilities() argument 106 nnapi->getFL5()->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 110 nnapi->getFL5()->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 114 nnapi->getFL5()->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 118 nnapi->getFL5()->SL_ANeuralNetworksDevice_getPerformanceInfo( in getCapabilities() 130 nnapi->getFL5()->SL_ANeuralNetworksDevice_forEachOperandPerformanceInfo( in getCapabilities() 136 NumberOfCacheFiles getNumberOfCacheFilesNeeded(const NnApiSupportLibrary* nnapi, in getNumberOfCacheFilesNeeded() argument 140 nnapi->getFL5()->SL_ANeuralNetworksDevice_getNumberOfCacheFilesNeeded( in getNumberOfCacheFilesNeeded() 148 std::vector<Extension> getVendorExtensions(const NnApiSupportLibrary* nnapi, in getVendorExtensions() argument 151 nnapi->getFL5()->SL_ANeuralNetworksDevice_getVendorExtensionCount(device, in getVendorExtensions() [all …]
|
D | ShimConverter.cpp | 47 const NnApiSupportLibrary* nnapi, in convertSubgraphFromHAL() argument 63 ::android::nn::sl_wrapper::Model resultModel(nnapi); in convertSubgraphFromHAL() 168 auto subgraph = convertSubgraphFromHAL(nnapi, memoryPools, model, allModels, in convertSubgraphFromHAL() 369 std::optional<ShimConvertedModel> convertFromHAL(const NnApiSupportLibrary* nnapi, in convertFromHAL() argument 389 std::unique_ptr<::android::nn::sl_wrapper::Memory> memory = convertFromHAL(nnapi, pool); in convertFromHAL() 406 if (convertSubgraphFromHAL(nnapi, memoryPools, model, &allModels, i, *copiedOperandValues, in convertFromHAL() 429 const NnApiSupportLibrary* nnapi, const neuralnetworks::Memory& pool) { in convertFromHAL() argument 438 nnapi, size, PROT_READ | PROT_WRITE, fd, 0, /*ownsFd=*/false); in convertFromHAL() 452 nnapi, size, prot, fd, offset, /*ownsFd=*/false); in convertFromHAL() 498 std::make_unique<::android::nn::sl_wrapper::Memory>(nnapi, ahwb, in convertFromHAL()
|
D | ShimPreparedModel.cpp | 257 const std::shared_ptr<const NnApiSupportLibrary>& nnapi, in executeFencedInternal() argument 272 nnapi->getFL5()->ANeuralNetworksEvent_createFromSyncFenceFd( in executeFencedInternal() 278 const auto guard = ::android::base::make_scope_guard([nnapi, deps] { in executeFencedInternal() 281 nnapi->getFL5()->ANeuralNetworksEvent_free(const_cast<ANeuralNetworksEvent*>(dep)); in executeFencedInternal() 288 Event e(nnapi.get()); in executeFencedInternal() 542 std::shared_ptr<const NnApiSupportLibrary> nnapi, 585 std::shared_ptr<const NnApiSupportLibrary> nnapi, in ShimExecution() argument 589 : mNnapi(std::move(nnapi)), in ShimExecution()
|
/packages/modules/NeuralNetworks/runtime/include/ |
D | NeuralNetworksWrapper.h | 188 Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemory* memory) in Memory() argument 189 : mNnApi(nnapi), mMemory(memory) {} in Memory() 191 Memory(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd, size_t offset) argument 192 : mNnApi(nnapi) { 202 Memory(const NnApiSupportLibrary* nnapi, AHardwareBuffer* buffer) : mNnApi(nnapi) { argument 254 Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) { argument 391 Event(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) {} argument 392 Event(const NnApiSupportLibrary* nnapi, int syncFd) : mNnApi(nnapi) { argument 466 static std::pair<Result, Compilation> createForDevice(const NnApiSupportLibrary* nnapi, argument 469 return createForDevices(nnapi, model, {device}); [all …]
|
/packages/modules/NeuralNetworks/shim_and_sl/include/ |
D | SupportLibraryWrapper.h | 51 Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemory* memory) in Memory() argument 52 : mNnApi(nnapi), mMemory(memory), mSize(0) {} in Memory() 55 Memory(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd, size_t offset, 57 : mNnApi(nnapi), mOwnedFd(ownsFd ? std::optional<int>{fd} : std::nullopt), mSize(size) { in mNnApi() argument 63 Memory(const NnApiSupportLibrary* nnapi, AHardwareBuffer* buffer, bool ownAHWB, size_t size) in Memory() argument 64 : mNnApi(nnapi), mOwnedAHWB(ownAHWB ? buffer : nullptr), mSize(size) { in Memory() 70 Memory(const NnApiSupportLibrary* nnapi, ANeuralNetworksMemoryDesc* desc, size_t size) in Memory() argument 71 : mNnApi(nnapi), mSize(size) { in Memory() 142 Model(const NnApiSupportLibrary* nnapi) : mNnApi(nnapi) { in Model() argument 351 static std::pair<Result, Compilation> createForDevice(const NnApiSupportLibrary* nnapi, in createForDevice() argument [all …]
|
D | ShimConverter.h | 51 std::optional<ShimConvertedModel> convertFromHAL(const NnApiSupportLibrary* nnapi, 56 const NnApiSupportLibrary* nnapi, const neuralnetworks::Memory& pool);
|
D | ShimPreparedModel.h | 34 ShimPreparedModel(std::shared_ptr<const NnApiSupportLibrary> nnapi, in ShimPreparedModel() argument 40 : mNnapi(nnapi), in ShimPreparedModel()
|
/packages/modules/NeuralNetworks/runtime/test/ |
D | GeneratedTestUtils.cpp | 62 static std::unique_ptr<MemoryWithPointer> create(const NnApiSupportLibrary* nnapi, in create() argument 84 new MemoryWithPointer(nnapi, size, protect, fd.get(), offset, std::move(mapping))); in create() 95 MemoryWithPointer(const NnApiSupportLibrary* nnapi, size_t size, int protect, int fd, in create() argument 97 : Memory(nnapi, size, protect, fd, offset), mMapping(std::move(mapping)) {} in create() 109 const NnApiSupportLibrary* nnapi, const TestModel& testModel) { argument 129 return size == 0 ? nullptr : MemoryWithPointer::create(nnapi, size); 189 void createModel(const NnApiSupportLibrary* nnapi, const TestModel& testModel, argument 197 std::unique_ptr<MemoryWithPointer> memory = createConstantReferenceMemory(nnapi, testModel); 206 refModels.push_back(Model(nnapi));
|
D | GeneratedTestUtils.h | 58 GeneratedModel(const NnApiSupportLibrary* nnapi) : sl_wrapper::Model(nnapi) {} in GeneratedModel() argument 87 void createModel(const NnApiSupportLibrary* nnapi, const test_helper::TestModel& testModel, 89 inline void createModel(const NnApiSupportLibrary* nnapi, const test_helper::TestModel& testModel, in createModel() argument 91 createModel(nnapi, testModel, /*testDynamicOutputShape=*/false, model); in createModel()
|
D | SupportLibraryTestUtils.h | 39 static std::unique_ptr<TestAshmem> createFrom(const NnApiSupportLibrary* nnapi, in createFrom() argument 41 return createFrom(nnapi, buffer.get<void>(), buffer.size()); in createFrom() 45 static std::unique_ptr<TestAshmem> createFrom(const NnApiSupportLibrary* nnapi, in createFrom() argument 59 sl_wrapper::Memory memory(nnapi, length, PROT_READ | PROT_WRITE, fd, 0); in createFrom()
|
D | TestGpuNnapi.cpp | 814 auto nnapi = std::make_unique<NnapiExecutor>(input, output); in create() local 815 nnapi->initialize(device); in create() 816 return nnapi->mIsValid ? std::move(nnapi) : nullptr; in create() 961 auto nnapi = NnapiExecutor<dataType>::create(kDevice, mGpuOutput, mNnapiOutput); in runTest() local 962 if (nnapi == nullptr) return; in runTest() 969 auto [nnapiSuccess, nnapiSyncFd] = nnapi->run(gpuSyncFd); in runTest()
|
/packages/modules/NeuralNetworks/common/types/ |
D | Android.bp | 72 "include/nnapi", 118 "include/nnapi",
|
/packages/modules/NeuralNetworks/driver/sample_shim/config/ |
D | android.hardware.neuralnetworks-shell-service-sample.rc | 2 interface aidl android.hardware.neuralnetworks.IDevice/nnapi-sample_sl_updatable
|
/packages/modules/NeuralNetworks/runtime/test/specs/ |
D | visualize_spec.sh | 27 LOG_DIR=$(mktemp -d)/nnapi-spec-html
|
/packages/modules/NeuralNetworks/tools/api/ |
D | Types.t | 37 #include "nnapi/OperandTypes.h" 38 #include "nnapi/OperationTypes.h" 39 #include "nnapi/Result.h"
|
D | generate_api.sh | 31 CANONICALDIR=${ANDROID_BUILD_TOP}/packages/modules/NeuralNetworks/common/types/include/nnapi
|
/packages/modules/NeuralNetworks/runtime/test/fuzzing/ |
D | visualize_random_graph.sh | 27 LOG_DIR=$(mktemp -d)/nnapi-fuzzing-logs
|
/packages/modules/NeuralNetworks/ |
D | TEST_MAPPING | 58 "path": "external/tensorflow/tensorflow/lite/delegates/nnapi"
|
/packages/modules/NeuralNetworks/tools/test_generator/test_harness/ |
D | Android.bp | 47 local_include_dirs: ["include/nnapi"],
|
/packages/modules/NeuralNetworks/tools/ |
D | build_and_run_benchmark.sh | 25 LOGDIR=$(mktemp -d)/nnapi-logs
|