/packages/modules/NeuralNetworks/driver/sample/ |
D | CanonicalDevice.cpp | 186 std::vector<RunTimePoolInfo> poolInfos; in prepareModel() local 187 if (!setRunTimePoolInfosFromCanonicalMemories(&poolInfos, model.pools)) { in prepareModel() 193 kBufferTracker, std::move(poolInfos)); in prepareModel()
|
D | CanonicalPreparedModel.cpp | 119 std::vector<RunTimePoolInfo> poolInfos) in PreparedModel() argument 125 kPoolInfos(std::move(poolInfos)) { in PreparedModel()
|
D | CanonicalPreparedModel.h | 40 std::vector<RunTimePoolInfo> poolInfos);
|
/packages/modules/NeuralNetworks/driver/sample_hidl/ |
D | SampleDriver.cpp | 452 const std::vector<RunTimePoolInfo>& poolInfos, const OptionalTimePoint& deadline, in asyncExecute() argument 477 int n = executor.run(uncheckedConvert(model), uncheckedConvert(request), poolInfos, in asyncExecute() 508 const std::vector<RunTimePoolInfo>& poolInfos, in executeBase() argument 534 std::thread([&model, &driver, preparedModel, &poolInfos, request, measure, driverStart, in executeBase() 536 asyncExecute(request, measure, driverStart, model, driver, preparedModel, poolInfos, in executeBase() 572 const std::vector<RunTimePoolInfo>& poolInfos, in executeSynchronouslyBase() argument 609 int n = executor.run(uncheckedConvert(model), uncheckedConvert(request), poolInfos, in executeSynchronouslyBase() 770 const std::vector<RunTimePoolInfo>& poolInfos) in BurstExecutorWithCache() argument 771 : mModel(model), mDriver(driver), mModelPoolInfos(poolInfos) {} in BurstExecutorWithCache()
|
D | SampleDriverFloatXNNPACK.cpp | 1956 std::vector<RunTimePoolInfo> poolInfos; in getSupportedOperationsImpl() local 1957 setRunTimePoolInfosFromCanonicalMemories(&poolInfos, uncheckedConvert(model.pools)); in getSupportedOperationsImpl() 1958 auto operands = initializeRunTimeInfo(model.main, poolInfos, &model.operandValues); in getSupportedOperationsImpl()
|
/packages/modules/NeuralNetworks/common/ |
D | CpuExecutor.cpp | 365 bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos, in setRunTimePoolInfosFromCanonicalMemories() argument 367 CHECK(poolInfos != nullptr); in setRunTimePoolInfosFromCanonicalMemories() 368 poolInfos->clear(); in setRunTimePoolInfosFromCanonicalMemories() 369 poolInfos->reserve(pools.size()); in setRunTimePoolInfosFromCanonicalMemories() 372 poolInfos->push_back(*poolInfo); in setRunTimePoolInfosFromCanonicalMemories() 375 poolInfos->clear(); in setRunTimePoolInfosFromCanonicalMemories() 382 bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos, in setRunTimePoolInfosFromMemoryPools() argument 384 CHECK(poolInfos != nullptr); in setRunTimePoolInfosFromMemoryPools() 385 poolInfos->clear(); in setRunTimePoolInfosFromMemoryPools() 386 poolInfos->reserve(pools.size()); in setRunTimePoolInfosFromMemoryPools() [all …]
|
D | LegacyHalUtils.cpp | 315 bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos, in setRunTimePoolInfosFromHidlMemories() argument 317 return setRunTimePoolInfosFromCanonicalMemories(poolInfos, uncheckedConvert(pools)); in setRunTimePoolInfosFromHidlMemories()
|
/packages/modules/NeuralNetworks/common/include/ |
D | CpuExecutor.h | 123 bool setRunTimePoolInfosFromCanonicalMemories(std::vector<RunTimePoolInfo>* poolInfos, 126 bool setRunTimePoolInfosFromMemoryPools(std::vector<RunTimePoolInfo>* poolInfos,
|
D | LegacyHalUtils.h | 191 bool setRunTimePoolInfosFromHidlMemories(std::vector<RunTimePoolInfo>* poolInfos,
|
/packages/modules/NeuralNetworks/runtime/ |
D | Manager.cpp | 960 CpuPreparedModel(Model model, std::vector<RunTimePoolInfo> poolInfos) in CpuPreparedModel() argument 961 : mModel(std::move(model)), mModelPoolInfos(std::move(poolInfos)) {} in CpuPreparedModel() 1064 std::vector<RunTimePoolInfo> poolInfos; in create() local 1065 if (!setRunTimePoolInfosFromCanonicalMemories(&poolInfos, model.pools)) { in create() 1070 std::make_shared<CpuPreparedModel>(std::move(model), std::move(poolInfos)); in create()
|