Home
last modified time | relevance | path

Searched refs:inputTensors (Results 1 – 25 of 89) sorted by relevance

1234

/external/pytorch/torch/csrc/distributed/c10d/
DProcessGroupMPI.cpp124 const std::optional<std::vector<at::Tensor>>& inputTensors) in AsyncWork() argument
125 : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), in AsyncWork()
382 const std::optional<std::vector<at::Tensor>>& inputTensors) { in enqueue() argument
384 c10::make_intrusive<WorkMPI>(entry->dst, profilingTitle, inputTensors); in enqueue()
481 std::vector<at::Tensor>& inputTensors, in allgather() argument
483 checkSingleTensor(inputTensors); in allgather()
497 checkSameSizeAndType(inputTensors[0], outputTensors[0]); in allgather()
521 &inputTensors, &outputTensors[0], std::move(runFunc)); in allgather()
525 std::optional<std::vector<at::Tensor>>(inputTensors)); in allgather()
537 std::vector<at::Tensor>& inputTensors, in gather() argument
[all …]
DProcessGroupUCC.cpp771 std::vector<at::Tensor>& inputTensors, in collective_post() argument
777 opType, seq_, prof_title, inputTensors, logger); in collective_post()
789 inputTensors, in collective_post()
857 std::vector<at::Tensor>& inputTensors, in allgather() argument
859 auto& tensor = inputTensors[0]; in allgather()
884 SAVE_TENSORS(inputTensors, data->src); in allgather()
894 inputTensors, in allgather()
928 auto inumel = inputTensors[i].numel(); in allgather()
951 inputTensors, in allgather()
980 std::vector<at::Tensor> inputTensors = {inputTensor}; in _allgather_base() local
[all …]
DProcessGroupMPI.hpp89 const std::optional<std::vector<at::Tensor>>& inputTensors = in WorkMPI() argument
91 : Work(-1, OpType::UNKNOWN, profilingTitle, inputTensors), in WorkMPI()
117 const std::optional<std::vector<at::Tensor>>& inputTensors =
174 std::vector<at::Tensor>& inputTensors,
184 std::vector<at::Tensor>& inputTensors,
189 std::vector<at::Tensor>& inputTensors,
194 std::vector<std::vector<at::Tensor>>& inputTensors,
199 std::vector<std::vector<at::Tensor>>& inputTensors,
211 std::vector<at::Tensor>& inputTensors,
246 const std::optional<std::vector<at::Tensor>>& inputTensors =
DPyProcessGroup.hpp71 std::vector<at::Tensor>& inputTensors, in allgather() argument
78 inputTensors, in allgather()
84 std::vector<at::Tensor>& inputTensors, in allgather_into_tensor_coalesced() argument
91 inputTensors, in allgather_into_tensor_coalesced()
157 std::vector<std::vector<at::Tensor>>& inputTensors, in reduce_scatter() argument
164 inputTensors, in reduce_scatter()
170 std::vector<at::Tensor>& inputTensors, in reduce_scatter_tensor_coalesced() argument
177 inputTensors, in reduce_scatter_tensor_coalesced()
DProcessGroupWrapper.cpp433 std::vector<at::Tensor>& inputTensors, in allgather() argument
436 runCollectiveChecks(OpType::ALLGATHER, inputTensors); in allgather()
440 return backend_->allgather(outputTensors, inputTensors, opts); in allgather()
447 std::vector<at::Tensor> inputTensors({inputBuffer}); in _allgather_base() local
448 runCollectiveChecks(OpType::_ALLGATHER_BASE, inputTensors); in _allgather_base()
454 std::vector<at::Tensor>& inputTensors, in allgather_coalesced() argument
461 return backend_->allgather_coalesced(outputTensorLists, inputTensors, opts); in allgather_coalesced()
466 std::vector<at::Tensor>& inputTensors, in gather() argument
468 runCollectiveChecks(OpType::GATHER, inputTensors); in gather()
469 return backend_->gather(outputTensors, inputTensors, opts); in gather()
[all …]
DProcessGroupWrapper.hpp38 std::vector<at::Tensor>& inputTensors,
52 std::vector<at::Tensor>& inputTensors,
57 std::vector<at::Tensor>& inputTensors,
62 std::vector<std::vector<at::Tensor>>& inputTensors,
67 std::vector<std::vector<at::Tensor>>& inputTensors,
79 std::vector<at::Tensor>& inputTensors,
DProcessGroup.hpp217 std::vector<at::Tensor>& inputTensors, in allgather() argument
231 inputTensors, in allgather()
268 std::vector<at::Tensor>& inputTensors, in allgather_coalesced() argument
280 inputTensors, in allgather_coalesced()
289 std::vector<at::Tensor>& inputTensors, in allgather_into_tensor_coalesced() argument
301 inputTensors, in allgather_into_tensor_coalesced()
307 std::vector<at::Tensor>& inputTensors, in gather() argument
319 inputTensors, in gather()
327 std::vector<std::vector<at::Tensor>>& inputTensors, in scatter() argument
342 inputTensors, in scatter()
[all …]
/external/armnn/src/backends/backendsCommon/test/
DStridedSliceAsyncEndToEndTest.hpp57 InputTensors inputTensors; in AsyncThreadedEndToEndTestImpl() local
61 inputTensors.reserve(inputTensorData.size()); in AsyncThreadedEndToEndTestImpl()
66 inputTensors.push_back({it.first, in AsyncThreadedEndToEndTestImpl()
80 inputTensorsVec.push_back(inputTensors); in AsyncThreadedEndToEndTestImpl()
90 InputTensors& inputTensors = inputTensorsVec[i]; in AsyncThreadedEndToEndTestImpl() local
97 runtime->Execute(workingMemHandle, inputTensors, outputTensors); in AsyncThreadedEndToEndTestImpl()
149 InputTensors inputTensors; in AsyncEndToEndTestImpl() local
150 inputTensors.reserve(inputTensorData.size()); in AsyncEndToEndTestImpl()
155 inputTensors.push_back({it.first, in AsyncEndToEndTestImpl()
189 runtime->Execute(workingMemHandleRef, inputTensors, outputTensorsVec[0]); in AsyncEndToEndTestImpl()
[all …]
DEndToEndTestImpl.hpp68 InputTensors inputTensors in ConstantUsageTest() local
78 runtime->EnqueueWorkload(netId, inputTensors, outputTensors); in ConstantUsageTest()
152 InputTensors inputTensors; in EndToEndLayerTestImpl() local
153 inputTensors.reserve(inputTensorData.size()); in EndToEndLayerTestImpl()
156 inputTensors.push_back({it.first, in EndToEndLayerTestImpl()
172 runtime->EnqueueWorkload(netId, inputTensors, outputTensors); in EndToEndLayerTestImpl()
240 InputTensors inputTensors in ImportNonAlignedInputPointerTest() local
252 …CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportExceptio… in ImportNonAlignedInputPointerTest()
309 InputTensors inputTensors in ExportNonAlignedOutputPointerTest() local
322 …CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportExceptio… in ExportNonAlignedOutputPointerTest()
[all …]
/external/armnn/src/armnnTfLiteParser/test/
DFullyConnected.cpp23 std::string inputTensors = "[ 0, 2 ]"; in FullyConnectedFixture() local
28 inputTensors = "[ 0, 2, 3 ]"; in FullyConnectedFixture()
93 "inputs": )" + inputTensors + R"(, in FullyConnectedFixture()
204 std::string inputTensors = "[ 0, 1 ]"; in FullyConnectedNonConstWeightsFixture() local
210 inputTensors = "[ 0, 1, 2 ]"; in FullyConnectedNonConstWeightsFixture()
283 "inputs": )" + inputTensors + R"(, in FullyConnectedNonConstWeightsFixture()
288 "inputs": )" + inputTensors + R"(, in FullyConnectedNonConstWeightsFixture()
DParserFlatbuffersFixture.hpp285 void FillInputTensors(armnn::InputTensors& inputTensors,
293 armnn::InputTensors& inputTensors, in FillInputTensors() argument
302inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data(… in FillInputTensors()
347 armnn::InputTensors inputTensors; in RunTest() local
348 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId); in RunTest()
372 m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors); in RunTest()
390 m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors); in RunTest()
418 armnn::InputTensors inputTensors; in RunTest() local
419 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId); in RunTest()
436 m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors); in RunTest()
[all …]
DDepthwiseConvolution2D.cpp22 std::string inputTensors = "[ 0, 2 ]"; in DepthwiseConvolution2dFixture() local
27 inputTensors = "[ 0, 2, 3 ]"; in DepthwiseConvolution2dFixture()
92 "inputs": )" + inputTensors + R"(, in DepthwiseConvolution2dFixture()
238 std::string inputTensors = "[ 0, 2 ]"; in DepthwiseConvolution2dFixture2() local
243 inputTensors = "[ 0, 2, 3 ]"; in DepthwiseConvolution2dFixture2()
318 "inputs": )" + inputTensors + R"(, in DepthwiseConvolution2dFixture2()
/external/android-nn-driver/
DArmnnPreparedModel.cpp316 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
324 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
333 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteGraph()
342 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteGraph()
349 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteGraph()
401 armnn::InputTensors inputTensors; in ExecuteWithDummyInputs() local
412 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
431 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteWithDummyInputs()
440 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteWithDummyInputs()
447 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteWithDummyInputs()
[all …]
DArmnnPreparedModel.hpp55 armnn::InputTensors& inputTensors,
71 std::shared_ptr<armnn::InputTensors>& inputTensors, in ArmnnThreadPoolCallback() argument
76 m_InputTensors(inputTensors), in ArmnnThreadPoolCallback()
97 std::shared_ptr<armnn::InputTensors>& inputTensors,
DArmnnPreparedModel_1_2.cpp526 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
536 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
560 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteGraph()
570 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteGraph()
577 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteGraph()
635 armnn::InputTensors inputTensors; in ExecuteWithDummyInputs() local
646 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
665 inputTensors, in ExecuteWithDummyInputs()
704 auto inputTensors = std::make_shared<armnn::InputTensors>(); in Execute() local
707 …auto prepareStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request, callbac… in Execute()
[all …]
DRequestThread.hpp42 std::shared_ptr<armnn::InputTensors>& inputTensors,
55 std::shared_ptr<armnn::InputTensors>& inputTensors, in AsyncExecuteData()
60 , m_InputTensors(inputTensors) in AsyncExecuteData()
DRequestThread_1_3.hpp41 std::shared_ptr<armnn::InputTensors>& inputTensors,
54 std::shared_ptr<armnn::InputTensors>& inputTensors, in AsyncExecuteData()
59 , m_InputTensors(inputTensors) in AsyncExecuteData()
/external/armnn/shim/sl/canonical/
DArmnnPreparedModel.cpp331 auto inputTensors = std::make_shared<armnn::InputTensors>(); in execute() local
335 ErrorStatus theErrorStatus = PrepareMemoryForIO(*inputTensors, in execute()
365 …auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx, isPointerTypeMemory); in execute()
377 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
384 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
395 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); in ExecuteGraph()
402 inputTensors.erase( in ExecuteGraph()
404 inputTensors.begin(), inputTensors.end(), in ExecuteGraph()
408 inputTensors.end()); in ExecuteGraph()
428 inputTensors, in ExecuteGraph()
[all …]
/external/tensorflow/tensorflow/lite/java/src/main/java/org/tensorflow/lite/
DNativeInterpreterWrapper.java118 this.inputTensors = new TensorImpl[getInputCount(interpreterHandle)]; in init()
134 for (int i = 0; i < inputTensors.length; ++i) { in close()
135 if (inputTensors[i] != null) { in close()
136 inputTensors[i].close(); in close()
137 inputTensors[i] = null; in close()
280 if (inputTensors[idx] != null) { in resizeInput()
281 inputTensors[idx].refreshShape(); in resizeInput()
363 return inputTensors.length; in getInputTensorCount()
372 if (index < 0 || index >= inputTensors.length) { in getInputTensor()
375 TensorImpl inputTensor = inputTensors[index]; in getInputTensor()
[all …]
/external/pytorch/test/cpp_extensions/
Dcpp_c10d_extension.hpp63 std::vector<at::Tensor>& inputTensors,
76 std::vector<at::Tensor>& inputTensors,
81 std::vector<std::vector<at::Tensor>>& inputTensors,
86 std::vector<std::vector<at::Tensor>>& inputTensors,
Dcpp_c10d_extension.cpp52 std::vector<at::Tensor>& inputTensors, in allgather() argument
71 std::vector<at::Tensor>& inputTensors, in gather() argument
78 std::vector<std::vector<at::Tensor>>& inputTensors, in scatter() argument
85 std::vector<std::vector<at::Tensor>>& inputTensors, in reduce_scatter() argument
/external/armnn/src/armnn/
DRuntime.cpp92 …ctor<ImportedInputId> IRuntime::ImportInputs(NetworkId networkId, const InputTensors& inputTensors, in ImportInputs() argument
95 return pRuntimeImpl->ImportInputs(networkId, inputTensors, forceImportMemorySource); in ImportInputs()
114 const InputTensors& inputTensors, in EnqueueWorkload() argument
119 return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors, in EnqueueWorkload()
124 const InputTensors& inputTensors, in Execute() argument
130 inputTensors, in Execute()
623 …r<ImportedInputId> RuntimeImpl::ImportInputs(NetworkId networkId, const InputTensors& inputTensors, in ImportInputs() argument
626 return GetLoadedNetworkPtr(networkId)->ImportInputs(inputTensors, forceImportMemorySource); in ImportInputs()
645 const InputTensors& inputTensors, in EnqueueWorkload() argument
678 auto status = loadedNetwork->EnqueueWorkload(inputTensors, outputTensors, in EnqueueWorkload()
[all …]
DThreadpool.cpp70 const InputTensors& inputTensors, in Schedule() argument
81 ExecutionTuple groupExecParams = std::make_tuple(networkId, inputTensors, outputTensors, cb); in Schedule()
181 auto inputTensors = std::get<1>(*currentExecInProgress); in ProcessExecPriorities() local
193 m_RuntimePtr->Execute(memHandle, inputTensors, outputTensors) == Status::Success ? in ProcessExecPriorities()
/external/armnn/delegate/opaque/src/
DRedefine.hpp29 const int* inputTensors; in VisitCastOperator() local
30 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) in VisitCastOperator()
37 … tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[numInputs-1]); in VisitCastOperator()
/external/armnn/src/armnnUtils/
DTensorIOUtils.hpp20 armnn::InputTensors inputTensors; in MakeInputTensors() local
47 inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor)); in MakeInputTensors()
52 return inputTensors; in MakeInputTensors()

1234