• Home
  • Raw
  • Download

Lines Matching refs:std

66     static std::shared_ptr<DriverDevice> create(SharedDevice device, bool isUpdatable = false);
71 const std::string& getName() const override { return kInterface->getName(); } in getName()
72 const std::string& getVersionString() const override { return kInterface->getVersionString(); } in getVersionString()
76 const std::vector<Extension>& getSupportedExtensions() const override { in getSupportedExtensions()
79 std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override;
96 std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override { in getNumberOfCacheFilesNeeded()
113 std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
116 const std::optional<CacheToken>& maybeToken) const override;
118 std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
125 GeneralResult<std::vector<bool>> getSupportedOperationsImpl(const MetaModel& metaModel) const;
150 std::tuple<int, std::vector<OutputShape>, Timing> execute(
151 const std::vector<ModelArgumentInfo>& inputs,
152 const std::vector<ModelArgumentInfo>& outputs,
153 const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController,
157 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> executeFenced(
158 const std::vector<ModelArgumentInfo>& inputs,
159 const std::vector<ModelArgumentInfo>& outputs,
160 const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
165 std::pair<int, std::shared_ptr<RuntimeExecution>> createReusableExecution(
166 const std::vector<ModelArgumentInfo>& inputs,
167 const std::vector<ModelArgumentInfo>& outputs,
168 const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure,
193 std::vector<const RuntimeMemory*> memories, MeasureTiming measure, in DriverExecution()
195 : kExecution(std::move(execution)), in DriverExecution()
196 kRequest(std::move(request)), in DriverExecution()
197 kMemories(std::move(memories)), in DriverExecution()
199 kLoopTimeoutDuration(std::move(loopTimeoutDuration)), in DriverExecution()
204 std::tuple<int, std::vector<OutputShape>, Timing> compute(
207 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> computeFenced(
208 const std::vector<int>& waitFor, const OptionalTimePoint& deadline,
216 const std::vector<const RuntimeMemory*> kMemories;
219 mutable std::map<const IBurst*, SharedExecution> mCachedBurstExecutions;
226 : kInterface(std::move(device)), kIsUpdatable(isUpdatable) { in DriverDevice()
236 std::shared_ptr<DriverDevice> DriverDevice::create(SharedDevice device, bool isUpdatable) { in create()
242 return std::make_shared<DriverDevice>(std::move(device), isUpdatable); in create()
265 GeneralResult<std::vector<bool>> DriverDevice::getSupportedOperationsImpl( in getSupportedOperationsImpl()
274 const std::vector<bool> supported = NN_TRY(kInterface->getSupportedOperations(sliceModel)); in getSupportedOperationsImpl()
283 std::vector<bool> remappedSupported(operationCount, false); in getSupportedOperationsImpl()
292 std::vector<bool> DriverDevice::getSupportedOperations(const MetaModel& metaModel) const { in getSupportedOperations()
300 return std::vector<bool>(model.main.operations.size(), false); in getSupportedOperations()
303 std::vector<bool>& supportedOperations = result.value(); in getSupportedOperations()
309 const uint32_t baseAccumulator = std::hash<std::string>{}(getName()); in getSupportedOperations()
318 auto accumulateOperands = [&model, &accumulator](const std::vector<uint32_t>& operands) { in getSupportedOperations()
345 static GeneralResult<SharedHandle> createCacheHandle(const std::string& filename, in createCacheHandle()
354 std::vector<base::unique_fd> fds; in createCacheHandle()
355 fds.push_back(std::move(fd)); in createCacheHandle()
356 return std::make_shared<const Handle>(Handle{ in createCacheHandle()
357 .fds = std::move(fds), in createCacheHandle()
364 static GeneralResult<std::vector<SharedHandle>> createCacheHandleVec( in createCacheHandleVec()
365 uint32_t numCacheFiles, const std::string& baseFilename, bool createIfNotExist) { in createCacheHandleVec()
367 std::vector<SharedHandle> handles; in createCacheHandleVec()
370 std::string filename = baseFilename + std::to_string(i); in createCacheHandleVec()
381 const std::pair<uint32_t, uint32_t>& numCacheFiles, bool createIfNotExist) { in getCacheHandles()
382 if (const auto* cacheHandles = std::get_if<CacheHandles>(&cacheInfo.variant)) { in getCacheHandles()
398 std::string filename(kByteSizeOfCacheToken * 2 + 1, '0'); in getCacheHandles()
404 const auto& cacheDir = std::get<CacheDir>(cacheInfo.variant); in getCacheHandles()
406 std::string cacheFileName = cacheDir + filename; in getCacheHandles()
410 std::vector<SharedHandle> modelCache = in getCacheHandles()
414 std::vector<SharedHandle> dataCache = in getCacheHandles()
418 .modelCache = std::move(modelCache), in getCacheHandles()
419 .dataCache = std::move(dataCache), in getCacheHandles()
432 std::pair<int, std::shared_ptr<RuntimePreparedModel>> DriverDevice::prepareModel( in prepareModel()
435 const std::optional<CacheToken>& maybeToken) const { in prepareModel()
441 std::make_shared<DriverPreparedModel>(this, std::move(result).value())}; in prepareModel()
455 cache = std::move(result).value(); in prepareModel()
475 SharedPreparedModel preparedModel = std::move(result).value(); in prepareModel()
479 std::make_shared<DriverPreparedModel>(this, std::move(preparedModel))}; in prepareModel()
482 std::pair<int, std::unique_ptr<RuntimeMemory>> DriverDevice::allocate(const MemoryDescriptor& desc, in allocate()
485 std::vector<SharedPreparedModel> preparedModels(desc.preparedModels.size()); in allocate()
486 std::transform(desc.preparedModels.begin(), desc.preparedModels.end(), preparedModels.begin(), in allocate()
499 return MemoryFromDevice::create(std::move(result).value()); in allocate()
502 static Request createDriverRequest(const std::vector<ModelArgumentInfo>& inputs, in createDriverRequest()
503 const std::vector<ModelArgumentInfo>& outputs, in createDriverRequest()
504 const std::vector<const RuntimeMemory*>& memories) { in createDriverRequest()
507 std::transform(inputs.begin(), inputs.end(), std::back_inserter(request.inputs), in createDriverRequest()
510 std::transform(outputs.begin(), outputs.end(), std::back_inserter(request.outputs), in createDriverRequest()
513 std::transform(memories.begin(), memories.end(), std::back_inserter(request.pools), in createDriverRequest()
524 std::tuple<int, std::vector<OutputShape>, Timing> DriverPreparedModel::execute( in execute()
525 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in execute()
526 const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController, in execute()
535 ExecutionResult<std::pair<std::vector<OutputShape>, Timing>> result; in execute()
542 if (const auto* maybeMemory = std::get_if<SharedMemory>(&pool)) { in execute()
556 std::vector<OutputShape> outputShapes; in execute()
561 std::tie(outputShapes, timing) = std::move(result).value(); in execute()
563 auto [message, code, returnedOutputShapes] = std::move(result).error(); in execute()
569 outputShapes = std::move(returnedOutputShapes); in execute()
571 return {n, std::move(outputShapes), timing}; in execute()
575 return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing}; in execute()
578 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> DriverPreparedModel::executeFenced( in executeFenced()
579 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in executeFenced()
580 const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, in executeFenced()
585 CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd >= 0; })); in executeFenced()
591 std::vector<SyncFence> waitForHandles; in executeFenced()
613 std::tie(syncFence, executeFencedInfoCallback) = std::move(result).value(); in executeFenced()
633 std::tie(std::ignore, timing) = result.value(); in executeFenced()
649 std::pair<int, std::shared_ptr<RuntimeExecution>> DriverPreparedModel::createReusableExecution( in createReusableExecution()
650 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in createReusableExecution()
651 const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure, in createReusableExecution()
662 auto execution = std::make_shared<DriverExecution>( in createReusableExecution()
663 std::move(result).value(), std::move(request), memories, measure, loopTimeoutDuration, in createReusableExecution()
665 return {ANEURALNETWORKS_NO_ERROR, std::move(execution)}; in createReusableExecution()
668 std::tuple<int, std::vector<OutputShape>, Timing> DriverExecution::compute( in compute()
681 if (const auto* maybeMemory = std::get_if<SharedMemory>(&pool)) { in compute()
694 execution = std::move(createResult).value(); in compute()
707 auto [message, code, returnedOutputShapes] = std::move(result).error(); in compute()
713 return {n, std::move(returnedOutputShapes), {}}; in compute()
719 auto [outputShapes, timing] = std::move(result).value(); in compute()
720 return {ANEURALNETWORKS_NO_ERROR, std::move(outputShapes), timing}; in compute()
723 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> DriverExecution::computeFenced( in computeFenced()
724 const std::vector<int>& waitFor, const OptionalTimePoint& deadline, in computeFenced()
727 CHECK(std::all_of(waitFor.begin(), waitFor.end(), [](int fd) { return fd >= 0; })); in computeFenced()
729 std::vector<SyncFence> waitForHandles; in computeFenced()
751 std::tie(syncFence, executeFencedInfoCallback) = std::move(result).value(); in computeFenced()
771 std::tie(std::ignore, timing) = result.value(); in computeFenced()
807 std::vector<Capabilities::OperandPerformance> operandPerformance; in createCpuCapabilities()
808 operandPerformance.reserve(std::size(operandTypes)); in createCpuCapabilities()
809 std::transform(std::begin(operandTypes), std::end(operandTypes), in createCpuCapabilities()
810 std::back_inserter(operandPerformance), [kPerf](OperandType type) { in createCpuCapabilities()
815 Capabilities::OperandPerformanceTable::create(std::move(operandPerformance)).value(); in createCpuCapabilities()
820 .operandPerformance = std::move(table), in createCpuCapabilities()
831 static std::shared_ptr<CpuDevice> get() { in get()
832 static std::shared_ptr<CpuDevice> instance(new CpuDevice); in get()
836 const std::string& getName() const override { return kName; } in getName()
837 const std::string& getVersionString() const override { return kVersionString; } in getVersionString()
841 const std::vector<Extension>& getSupportedExtensions() const override { in getSupportedExtensions()
844 std::vector<bool> getSupportedOperations(const MetaModel& metaModel) const override;
857 std::pair<uint32_t, uint32_t> getNumberOfCacheFilesNeeded() const override { in getNumberOfCacheFilesNeeded()
863 std::pair<int, std::shared_ptr<RuntimePreparedModel>> prepareModel(
866 const std::optional<CacheToken>& maybeToken) const override;
868 std::pair<int, std::unique_ptr<RuntimeMemory>> allocate(const MemoryDescriptor& desc,
874 const std::string kName = "nnapi-reference";
876 const std::string kVersionString = build::GetBuildNumber();
878 const std::string kVersionString = "UNKNOWN";
884 const std::vector<Extension> kSupportedExtensions{/* No extensions. */};
893 static std::pair<int, std::shared_ptr<RuntimePreparedModel>> create(Model model);
898 std::tuple<int, std::vector<OutputShape>, Timing> execute(
899 const std::vector<ModelArgumentInfo>& inputs,
900 const std::vector<ModelArgumentInfo>& outputs,
901 const std::vector<const RuntimeMemory*>& memories, const SharedBurst& burstController,
907 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> executeFenced(
908 const std::vector<ModelArgumentInfo>& inputs,
909 const std::vector<ModelArgumentInfo>& outputs,
910 const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor,
915 std::pair<int, std::shared_ptr<RuntimeExecution>> createReusableExecution(
916 const std::vector<ModelArgumentInfo>& inputs,
917 const std::vector<ModelArgumentInfo>& outputs,
918 const std::vector<const RuntimeMemory*>& memories, MeasureTiming measure,
926 CpuPreparedModel(Model model, std::vector<RunTimePoolInfo> poolInfos) in CpuPreparedModel()
927 : mModel(std::move(model)), mModelPoolInfos(std::move(poolInfos)) {} in CpuPreparedModel()
930 const std::vector<RunTimePoolInfo>& getModelPoolInfos() const { return mModelPoolInfos; } in getModelPoolInfos()
938 const std::vector<RunTimePoolInfo> mModelPoolInfos;
944 std::vector<RunTimePoolInfo> requestPoolInfos, in CpuExecution()
947 kRequest(std::move(request)), in CpuExecution()
948 kRequestPoolInfos(std::move(requestPoolInfos)), in CpuExecution()
949 kLoopTimeoutDuration(std::move(loopTimeoutDuration)) {} in CpuExecution()
951 std::tuple<int, std::vector<OutputShape>, Timing> compute(
954 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> computeFenced(
955 const std::vector<int>& waitFor, const OptionalTimePoint& deadline,
961 std::vector<RunTimePoolInfo> kRequestPoolInfos;
965 std::vector<bool> CpuDevice::getSupportedOperations(const MetaModel& metaModel) const { in getSupportedOperations()
968 std::vector<bool> result(count, false); in getSupportedOperations()
979 std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuDevice::prepareModel( in prepareModel()
982 const std::optional<CacheToken>& maybeToken) const { in prepareModel()
1006 std::pair<int, std::unique_ptr<RuntimeMemory>> CpuDevice::allocate(const MemoryDescriptor& desc, in allocate()
1016 std::pair<int, std::shared_ptr<RuntimePreparedModel>> CpuPreparedModel::create(Model model) { in create()
1017 std::vector<RunTimePoolInfo> poolInfos; in create()
1022 std::shared_ptr<RuntimePreparedModel> preparedModel = in create()
1023 std::make_shared<CpuPreparedModel>(std::move(model), std::move(poolInfos)); in create()
1024 return {ANEURALNETWORKS_NO_ERROR, std::move(preparedModel)}; in create()
1027 static std::tuple<int, std::vector<OutputShape>, Timing> computeOnCpu( in computeOnCpu()
1029 const std::vector<RunTimePoolInfo>& modelPoolInfos, in computeOnCpu()
1030 const std::vector<RunTimePoolInfo>& requestPoolInfos, const OptionalTimePoint& deadline, in computeOnCpu()
1045 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> CpuPreparedModel::executeFenced( in executeFenced()
1046 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in executeFenced()
1047 const std::vector<const RuntimeMemory*>& memories, const std::vector<int>& waitFor, in executeFenced()
1076 static std::tuple<int, Request, std::vector<RunTimePoolInfo>> createCpuRequest( in createCpuRequest()
1077 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in createCpuRequest()
1078 const std::vector<const RuntimeMemory*>& memories) { in createCpuRequest()
1079 std::vector<RunTimePoolInfo> requestPoolInfos; in createCpuRequest()
1082 if (std::optional<RunTimePoolInfo> poolInfo = mem->getRunTimePoolInfo()) { in createCpuRequest()
1090 [&requestPoolInfos](const std::vector<ModelArgumentInfo>& argumentInfos) { in createCpuRequest()
1091 std::vector<DataLocation> ptrArgsLocations; in createCpuRequest()
1105 const std::vector<DataLocation> inputPtrArgsLocations = fixPointerArguments(inputs); in createCpuRequest()
1106 const std::vector<DataLocation> outputPtrArgsLocations = fixPointerArguments(outputs); in createCpuRequest()
1111 return {ANEURALNETWORKS_NO_ERROR, std::move(request), std::move(requestPoolInfos)}; in createCpuRequest()
1121 std::tuple<int, std::vector<OutputShape>, Timing> CpuPreparedModel::execute( in execute()
1122 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in execute()
1123 const std::vector<const RuntimeMemory*>& memories, const SharedBurst& /*burstController*/, in execute()
1132 std::vector<RunTimePoolInfo> requestPoolInfos; in execute()
1133 std::tie(nCreateRequest, request, requestPoolInfos) = in execute()
1143 std::tuple<int, std::vector<OutputShape>, Timing> result = {}; in execute()
1144 std::thread([this, &request, &requestPoolInfos, &deadline, &loopTimeoutDuration, &result] { in execute()
1155 std::pair<int, std::shared_ptr<RuntimeExecution>> CpuPreparedModel::createReusableExecution( in createReusableExecution()
1156 const std::vector<ModelArgumentInfo>& inputs, const std::vector<ModelArgumentInfo>& outputs, in createReusableExecution()
1157 const std::vector<const RuntimeMemory*>& memories, MeasureTiming /*measure*/, in createReusableExecution()
1163 auto execution = std::make_shared<CpuExecution>( in createReusableExecution()
1164 *this, std::move(request), std::move(requestPoolInfos), loopTimeoutDuration); in createReusableExecution()
1165 return {ANEURALNETWORKS_NO_ERROR, std::move(execution)}; in createReusableExecution()
1168 std::tuple<int, std::vector<OutputShape>, Timing> CpuExecution::compute( in compute()
1178 std::tuple<int, std::vector<OutputShape>, Timing> result = {}; in compute()
1179 std::thread([this, &deadline, &result] { in compute()
1191 std::tuple<int, int, ExecuteFencedInfoCallback, Timing> CpuExecution::computeFenced( in computeFenced()
1192 const std::vector<int>& waitFor, const OptionalTimePoint& deadline, in computeFenced()
1224 std::shared_ptr<Device> DeviceManager::getCpuDevice() { in getCpuDevice()
1228 std::shared_ptr<Device> DeviceManager::forTest_makeDriverDevice(const SharedDevice& device) { in forTest_makeDriverDevice()
1236 std::vector<std::shared_ptr<DriverDevice>> getDriverDevices() { in getDriverDevices()
1245 std::vector<std::shared_ptr<DriverDevice>> driverDevices; in getDriverDevices()
1248 driverDevices.push_back(DriverDevice::create(std::move(device), isDeviceUpdatable)); in getDriverDevices()
1253 std::vector<std::shared_ptr<DriverDevice>> getDriverDevices() { in getDriverDevices()
1255 std::vector<std::shared_ptr<DriverDevice>> driverDevices; in getDriverDevices()
1258 driverDevices.push_back(DriverDevice::create(std::move(device))); in getDriverDevices()
1271 mDevices.push_back(std::move(driverDevice)); in findAvailableDevices()
1283 mDevices.push_back(std::move(driverDevice)); in registerDevice()