Home
last modified time | relevance | path

Searched refs:inputTensors (Results 1 – 25 of 63) sorted by relevance

123

/external/armnn/src/backends/backendsCommon/test/
DStridedSliceAsyncEndToEndTest.hpp57 InputTensors inputTensors; in AsyncThreadedEndToEndTestImpl() local
61 inputTensors.reserve(inputTensorData.size()); in AsyncThreadedEndToEndTestImpl()
66 inputTensors.push_back({it.first, in AsyncThreadedEndToEndTestImpl()
80 inputTensorsVec.push_back(inputTensors); in AsyncThreadedEndToEndTestImpl()
90 InputTensors& inputTensors = inputTensorsVec[i]; in AsyncThreadedEndToEndTestImpl() local
97 runtime->Execute(workingMemHandle, inputTensors, outputTensors); in AsyncThreadedEndToEndTestImpl()
149 InputTensors inputTensors; in AsyncEndToEndTestImpl() local
150 inputTensors.reserve(inputTensorData.size()); in AsyncEndToEndTestImpl()
155 inputTensors.push_back({it.first, in AsyncEndToEndTestImpl()
189 runtime->Execute(workingMemHandleRef, inputTensors, outputTensorsVec[0]); in AsyncEndToEndTestImpl()
[all …]
DEndToEndTestImpl.hpp68 InputTensors inputTensors in ConstantUsageTest() local
78 runtime->EnqueueWorkload(netId, inputTensors, outputTensors); in ConstantUsageTest()
152 InputTensors inputTensors; in EndToEndLayerTestImpl() local
153 inputTensors.reserve(inputTensorData.size()); in EndToEndLayerTestImpl()
156 inputTensors.push_back({it.first, in EndToEndLayerTestImpl()
172 runtime->EnqueueWorkload(netId, inputTensors, outputTensors); in EndToEndLayerTestImpl()
240 InputTensors inputTensors in ImportNonAlignedInputPointerTest() local
252 …CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportExceptio… in ImportNonAlignedInputPointerTest()
309 InputTensors inputTensors in ExportNonAlignedOutputPointerTest() local
322 …CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportExceptio… in ExportNonAlignedOutputPointerTest()
[all …]
DQLstmEndToEndTestImpl.cpp245 InputTensors inputTensors; in QLstmEndToEnd() local
246 inputTensors.reserve(3); in QLstmEndToEnd()
248inputTensors.push_back({0, ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputVector.data())}… in QLstmEndToEnd()
249inputTensors.push_back({1, ConstTensor(runtime->GetInputTensorInfo(netId, 1), outputStateInVector.… in QLstmEndToEnd()
250inputTensors.push_back({2, ConstTensor(runtime->GetInputTensorInfo(netId, 2), cellStateInVector.da… in QLstmEndToEnd()
265 runtime->EnqueueWorkload(netId, inputTensors, outputTensors); in QLstmEndToEnd()
DQuantizedLstmEndToEndTestImpl.cpp211 InputTensors inputTensors; in QuantizedLstmEndToEnd() local
212 inputTensors.reserve(3); in QuantizedLstmEndToEnd()
222 inputTensors.push_back({0, ConstTensor(inputTensorInfo0, inputVector.data())}); in QuantizedLstmEndToEnd()
223 inputTensors.push_back({1, ConstTensor(inputTensorInfo1, cellStateInVector.data())}); in QuantizedLstmEndToEnd()
224 inputTensors.push_back({2, ConstTensor(inputTensorInfo2, outputStateInVector.data())}); in QuantizedLstmEndToEnd()
236 runtime->EnqueueWorkload(netId, inputTensors, outputTensors); in QuantizedLstmEndToEnd()
/external/android-nn-driver/
DArmnnPreparedModel.cpp316 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
324 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
333 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteGraph()
342 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteGraph()
349 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteGraph()
401 armnn::InputTensors inputTensors; in ExecuteWithDummyInputs() local
412 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
431 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteWithDummyInputs()
440 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteWithDummyInputs()
447 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteWithDummyInputs()
[all …]
DArmnnPreparedModel_1_2.cpp526 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
536 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
560 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteGraph()
570 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteGraph()
577 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteGraph()
635 armnn::InputTensors inputTensors; in ExecuteWithDummyInputs() local
646 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
665 inputTensors, in ExecuteWithDummyInputs()
704 auto inputTensors = std::make_shared<armnn::InputTensors>(); in Execute() local
707 …auto prepareStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request, callbac… in Execute()
[all …]
DArmnnPreparedModel.hpp55 armnn::InputTensors& inputTensors,
71 std::shared_ptr<armnn::InputTensors>& inputTensors, in ArmnnThreadPoolCallback() argument
76 m_InputTensors(inputTensors), in ArmnnThreadPoolCallback()
97 std::shared_ptr<armnn::InputTensors>& inputTensors,
DRequestThread.hpp42 std::shared_ptr<armnn::InputTensors>& inputTensors,
55 std::shared_ptr<armnn::InputTensors>& inputTensors, in AsyncExecuteData()
60 , m_InputTensors(inputTensors) in AsyncExecuteData()
DRequestThread_1_3.hpp41 std::shared_ptr<armnn::InputTensors>& inputTensors,
54 std::shared_ptr<armnn::InputTensors>& inputTensors, in AsyncExecuteData()
59 , m_InputTensors(inputTensors) in AsyncExecuteData()
DArmnnPreparedModel_1_3.cpp828 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
836 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
859 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors); in ExecuteGraph()
868 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc… in ExecuteGraph()
875 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors, in ExecuteGraph()
933 std::shared_ptr<armnn::InputTensors>& inputTensors, in ScheduleGraphForExecution() argument
940 DumpTensorsIfRequired("Input", *inputTensors); in ScheduleGraphForExecution()
957 inputTensors, in ScheduleGraphForExecution()
973 armnn::InputTensors inputTensors; in ExecuteWithDummyInputs() local
984 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
[all …]
DArmnnPreparedModel_1_2.hpp85 armnn::InputTensors& inputTensors,
102 std::shared_ptr<armnn::InputTensors>& inputTensors, in ArmnnThreadPoolCallback_1_2() argument
108 m_InputTensors(inputTensors), in ArmnnThreadPoolCallback_1_2()
152 std::shared_ptr<armnn::InputTensors>& inputTensors,
/external/armnn/shim/sl/canonical/
DArmnnPreparedModel.cpp331 auto inputTensors = std::make_shared<armnn::InputTensors>(); in execute() local
335 ErrorStatus theErrorStatus = PrepareMemoryForIO(*inputTensors, in execute()
365 …auto errorStatus = ExecuteGraph(memPools, *inputTensors, *outputTensors, ctx, isPointerTypeMemory); in execute()
377 armnn::InputTensors& inputTensors, in ExecuteGraph() argument
384 DumpTensorsIfRequired("Input", inputTensors); in ExecuteGraph()
395 …importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc); in ExecuteGraph()
402 inputTensors.erase( in ExecuteGraph()
404 inputTensors.begin(), inputTensors.end(), in ExecuteGraph()
408 inputTensors.end()); in ExecuteGraph()
428 inputTensors, in ExecuteGraph()
[all …]
/external/armnn/src/armnnTfLiteParser/test/
DFullyConnected.cpp23 std::string inputTensors = "[ 0, 2 ]"; in FullyConnectedFixture() local
28 inputTensors = "[ 0, 2, 3 ]"; in FullyConnectedFixture()
93 "inputs": )" + inputTensors + R"(, in FullyConnectedFixture()
204 std::string inputTensors = "[ 0, 1 ]"; in FullyConnectedNonConstWeightsFixture() local
210 inputTensors = "[ 0, 1, 2 ]"; in FullyConnectedNonConstWeightsFixture()
283 "inputs": )" + inputTensors + R"(, in FullyConnectedNonConstWeightsFixture()
288 "inputs": )" + inputTensors + R"(, in FullyConnectedNonConstWeightsFixture()
DParserFlatbuffersFixture.hpp285 void FillInputTensors(armnn::InputTensors& inputTensors,
293 armnn::InputTensors& inputTensors, in FillInputTensors() argument
302inputTensors.push_back({ bindingInfo.first, armnn::ConstTensor(bindingInfo.second, it.second.data(… in FillInputTensors()
347 armnn::InputTensors inputTensors; in RunTest() local
348 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId); in RunTest()
372 m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors); in RunTest()
390 m_Runtime->EnqueueWorkload(m_DynamicNetworkIdentifier, inputTensors, outputTensors); in RunTest()
418 armnn::InputTensors inputTensors; in RunTest() local
419 FillInputTensors<armnnType1>(inputTensors, inputData, subgraphId); in RunTest()
436 m_Runtime->EnqueueWorkload(m_NetworkIdentifier, inputTensors, outputTensors); in RunTest()
[all …]
DDepthwiseConvolution2D.cpp22 std::string inputTensors = "[ 0, 2 ]"; in DepthwiseConvolution2dFixture() local
27 inputTensors = "[ 0, 2, 3 ]"; in DepthwiseConvolution2dFixture()
92 "inputs": )" + inputTensors + R"(, in DepthwiseConvolution2dFixture()
238 std::string inputTensors = "[ 0, 2 ]"; in DepthwiseConvolution2dFixture2() local
243 inputTensors = "[ 0, 2, 3 ]"; in DepthwiseConvolution2dFixture2()
318 "inputs": )" + inputTensors + R"(, in DepthwiseConvolution2dFixture2()
/external/tensorflow/tensorflow/lite/java/src/main/java/org/tensorflow/lite/
DNativeInterpreterWrapper.java118 this.inputTensors = new TensorImpl[getInputCount(interpreterHandle)]; in init()
134 for (int i = 0; i < inputTensors.length; ++i) { in close()
135 if (inputTensors[i] != null) { in close()
136 inputTensors[i].close(); in close()
137 inputTensors[i] = null; in close()
280 if (inputTensors[idx] != null) { in resizeInput()
281 inputTensors[idx].refreshShape(); in resizeInput()
363 return inputTensors.length; in getInputTensorCount()
372 if (index < 0 || index >= inputTensors.length) { in getInputTensor()
375 TensorImpl inputTensor = inputTensors[index]; in getInputTensor()
[all …]
/external/armnn/src/armnn/
DRuntime.cpp92 …ctor<ImportedInputId> IRuntime::ImportInputs(NetworkId networkId, const InputTensors& inputTensors, in ImportInputs() argument
95 return pRuntimeImpl->ImportInputs(networkId, inputTensors, forceImportMemorySource); in ImportInputs()
114 const InputTensors& inputTensors, in EnqueueWorkload() argument
119 return pRuntimeImpl->EnqueueWorkload(networkId, inputTensors, outputTensors, in EnqueueWorkload()
124 const InputTensors& inputTensors, in Execute() argument
130 inputTensors, in Execute()
623 …r<ImportedInputId> RuntimeImpl::ImportInputs(NetworkId networkId, const InputTensors& inputTensors, in ImportInputs() argument
626 return GetLoadedNetworkPtr(networkId)->ImportInputs(inputTensors, forceImportMemorySource); in ImportInputs()
645 const InputTensors& inputTensors, in EnqueueWorkload() argument
678 auto status = loadedNetwork->EnqueueWorkload(inputTensors, outputTensors, in EnqueueWorkload()
[all …]
DThreadpool.cpp70 const InputTensors& inputTensors, in Schedule() argument
81 ExecutionTuple groupExecParams = std::make_tuple(networkId, inputTensors, outputTensors, cb); in Schedule()
181 auto inputTensors = std::get<1>(*currentExecInProgress); in ProcessExecPriorities() local
193 m_RuntimePtr->Execute(memHandle, inputTensors, outputTensors) == Status::Success ? in ProcessExecPriorities()
DRuntime.hpp58 std::vector<ImportedInputId> ImportInputs(NetworkId networkId, const InputTensors& inputTensors,
68 const InputTensors& inputTensors,
78 const InputTensors& inputTensors,
/external/armnn/delegate/opaque/src/
DRedefine.hpp29 const int* inputTensors; in VisitCastOperator() local
30 if (TfLiteOpaqueNodeInputs(tfLiteNode, &inputTensors, &numInputs) != kTfLiteOk) in VisitCastOperator()
37 … tfLiteInputTensor = TfLiteOpaqueContextGetOpaqueTensor(tfLiteContext, inputTensors[numInputs-1]); in VisitCastOperator()
/external/armnn/src/armnnUtils/
DTensorIOUtils.hpp20 armnn::InputTensors inputTensors; in MakeInputTensors() local
47 inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor)); in MakeInputTensors()
52 return inputTensors; in MakeInputTensors()
/external/tensorflow/tensorflow/java/src/main/java/org/tensorflow/
DSession.java146 inputTensors.add(t); in feed()
157 inputTensors.add(t); in feed()
292 long[] inputTensorHandles = new long[inputTensors.size()]; in runHelper()
303 for (Tensor<?> t : inputTensors) { in runHelper()
405 private ArrayList<Tensor<?>> inputTensors = new ArrayList<Tensor<?>>(); field in Session.Runner
/external/armnn/src/armnn/test/
DRuntimeTests.cpp241 InputTensors inputTensors = {{0, inputTensor1}, {1, inputTensor2}}; variable
261 runtime->Execute(*memHandle.get(),inputTensors, {output1, output2});
269 runtime->Execute(*memHandle.get(), inputTensors, {}, {}, importedOutputVec);
272 runtime->Execute(*memHandle.get(), inputTensors, {output1}, {}, {1});
275 runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {0});
278 auto importedInputVec = runtime->ImportInputs(networkId, inputTensors, MemorySource::Malloc);
290 … CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output1, output2}, {}, {0, 1});,
294 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output2}, {}, {1});,
298 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {output1, output1}, {}, {});,
302 CHECK_THROWS_AS(runtime->Execute(*memHandle.get(), inputTensors, {}, {}, {0, 0}),
[all …]
/external/armnn/samples/
DAsyncExecutionSample.cpp109 std::vector<InputTensors> inputTensors in main() local
124 run->Execute(*memHandle, inputTensors[executionIndex], outputTensors[executionIndex]); in main()
129 for (unsigned int i = 0; i < inputTensors.size(); ++i) in main()
/external/armnn/src/profiling/test/
DFileOnlyProfilingDecoratorTests.cpp98 InputTensors inputTensors variable
108 runtime.EnqueueWorkload(netId, inputTensors, outputTensors);
223 InputTensors inputTensors variable
233 runtime.EnqueueWorkload(netId, inputTensors, outputTensors);

123