/external/tensorflow/tensorflow/lite/objc/tests/ |
D | TFLInterpreterTests.m | 132 TFLTensor *inputTensor = [customInterpreter inputTensorAtIndex:0 error:&error]; 133 XCTAssertNotNil(inputTensor); 135 XCTAssertTrue([inputTensor.name isEqualToString:@"input"]); 136 XCTAssertEqual(inputTensor.dataType, TFLTensorDataTypeFloat32); 137 NSArray<NSNumber *> *inputTensorShape = [inputTensor shapeWithError:&error]; 147 XCTAssertTrue([inputTensor copyData:inputData error:&error]); 220 TFLTensor *inputTensor = [customInterpreter inputTensorAtIndex:0 error:&error]; 221 XCTAssertNotNil(inputTensor); 223 XCTAssertTrue([inputTensor.name isEqualToString:@"input"]); 224 XCTAssertEqual(inputTensor.dataType, TFLTensorDataTypeUInt8); [all …]
|
D | TFLCoreMLDelegateTests.m | 86 TFLTensor* inputTensor = [interpreter inputTensorAtIndex:0 error:&error]; 87 XCTAssertNotNil(inputTensor); 88 XCTAssertTrue([inputTensor copyData:inputData error:&error]);
|
D | TFLMetalDelegateTests.m | 94 TFLTensor* inputTensor = [interpreter inputTensorAtIndex:input_idx error:&error]; 95 XCTAssertNotNil(inputTensor); 96 XCTAssertTrue([inputTensor copyData:inputData error:&error]);
|
/external/tensorflow/tensorflow/lite/swift/Tests/ |
D | TensorTests.swift | 27 let inputTensor = Tensor( in testInit() variable 34 XCTAssertEqual(inputTensor.name, name) in testInit() 35 XCTAssertEqual(inputTensor.dataType, dataType) in testInit() 36 XCTAssertEqual(inputTensor.shape, shape) in testInit() 37 XCTAssertEqual(inputTensor.data, data) in testInit() 38 XCTAssertEqual(inputTensor.quantizationParameters, quantizationParameters) in testInit()
|
D | InterpreterTests.swift | 74 let inputTensor = try interpreter.input(at: AddModel.validIndex) in testInputTensorAtIndex() variable 75 XCTAssertEqual(inputTensor, AddModel.inputTensor) in testInputTensorAtIndex() 81 let inputTensor = try interpreter.input(at: AddQuantizedModel.inputOutputIndex) in testInputTensorAtIndex_QuantizedModel() variable 82 XCTAssertEqual(inputTensor, AddQuantizedModel.inputTensor) in testInputTensorAtIndex_QuantizedModel() 162 let inputTensor = try interpreter.copy(AddModel.inputData, toInputAt: AddModel.validIndex) in testCopyDataToInputTensorAtIndex() variable 163 XCTAssertEqual(inputTensor.data, AddModel.inputData) in testCopyDataToInputTensorAtIndex() 286 static let inputTensor = Tensor( variable 316 static let inputTensor = Tensor( variable
|
/external/tensorflow/tensorflow/lite/objc/apps/TestApp/TestApp/ |
D | ViewController.m | 198 TFLTensor *inputTensor = [weakSelf.interpreter inputTensorAtIndex:0 error:&error]; 199 if (inputTensor == nil || error != nil) { 210 if (![inputTensor copyData:inputData error:&error]) { 268 TFLTensor *inputTensor = [weakSelf.interpreter inputTensorAtIndex:0 error:&error]; 269 if (inputTensor == nil || error != nil) { 280 if (![inputTensor copyData:inputData error:&error]) { 355 TFLTensor *inputTensor = [weakSelf.interpreter inputTensorAtIndex:i error:&error]; 356 if (inputTensor == nil || error != nil) { 370 if (![inputTensor copyData:inputData error:&error]) {
|
/external/android-nn-driver/ |
D | ArmnnPreparedModel.cpp | 182 …const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPool… in execute() local 191 uint8_t* inputTensorBegin = static_cast<uint8_t*>(inputTensor.GetMemoryArea()); in execute() 209 pInputTensors->emplace_back(i, inputTensor); in execute() 339 const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data()); in ExecuteWithDummyInputs() local 341 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
|
D | ArmnnPreparedModel_1_2.cpp | 226 …const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools); in PrepareMemoryForInputs() local 235 uint8_t* inputTensorBegin = static_cast<uint8_t*>(inputTensor.GetMemoryArea()); in PrepareMemoryForInputs() 253 inputs.emplace_back(i, inputTensor); in PrepareMemoryForInputs() 518 const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data()); in ExecuteWithDummyInputs() local 520 inputTensors.emplace_back(i, inputTensor); in ExecuteWithDummyInputs()
|
/external/armnn/src/armnn/test/optimizations/ |
D | Fp32NetworkToBf16ConverterTests.cpp | 95 armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); in BOOST_AUTO_TEST_CASE() local 100 BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16)); in BOOST_AUTO_TEST_CASE() 165 armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo(); in BOOST_AUTO_TEST_CASE() local 170 BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16)); in BOOST_AUTO_TEST_CASE()
|
/external/armnn/src/backends/backendsCommon/test/layerTests/ |
D | PadTestImpl.cpp | 57 auto inputTensor = MakeTensor<T, 2>(inputTensorInfo, std::vector<T>(inputValues)); in Pad2dTestCommon() local 84 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); in Pad2dTestCommon() 143 auto inputTensor = MakeTensor<T, 3>(inputTensorInfo, std::vector<T>(inputValues)); in Pad3dTestCommon() local 170 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0]); in Pad3dTestCommon() 383 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, std::vector<T>(inputValues)); in Pad4dTestCommon() local 410 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); in Pad4dTestCommon()
|
D | InstanceNormalizationTestImpl.cpp | 39 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, in InstanceNormTestImpl() local 60 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); in InstanceNormTestImpl()
|
D | L2NormalizationTestImpl.cpp | 51 auto inputTensor = MakeTensor<T, 4>(inputTensorInfo, in L2NormalizationTestImpl() local 88 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0][0][0]); in L2NormalizationTestImpl() 728 auto inputTensor = MakeTensor<float, 2>(inputTensorInfo, inputData); in L2Normalization2dShapeTest() local 749 CopyDataToITensorHandle(inputHandle.get(), &inputTensor[0][0]); in L2Normalization2dShapeTest()
|
/external/armnn/src/armnnUtils/ |
D | TensorIOUtils.hpp | 45 armnn::ConstTensor inputTensor(inputBinding.second, value.data()); in MakeInputTensors() local 46 inputTensors.push_back(std::make_pair(inputBinding.first, inputTensor)); in MakeInputTensors()
|
/external/armnn/src/armnnQuantizer/ |
D | ArmNNQuantizerMain.cpp | 77 armnn::ConstTensor inputTensor(tensorInfo, inputData[count].data()); in main() local 78 inputTensors.push_back(std::make_pair(layerBindingId, inputTensor)); in main()
|
/external/armnn/delegate/src/test/ |
D | RedefineTestHelper.hpp | 47 auto inputTensor = CreateTensor(flatBufferBuilder, in CreateRedefineTfLiteModel() local 70 tensors = { inputTensor, outputTensor}; in CreateRedefineTfLiteModel() 89 tensors = { inputTensor, outputTensor, shapeTensor }; in CreateRedefineTfLiteModel()
|
/external/armnn/src/armnnTfLiteParser/test/ |
D | Conv2D.cpp | 27 "name": "inputTensor", in SimpleConv2DFixture() 132 "name": "inputTensor", in Conv2DWithBiasesFixture()
|
D | LeakyRelu.cpp | 29 "name": "inputTensor", in LeakyReluFixture()
|
D | Softmax.cpp | 29 "name": "inputTensor", in SoftmaxFixture()
|
D | Neg.cpp | 29 "name": "inputTensor", in NegFixture()
|
D | Exp.cpp | 29 "name": "inputTensor", in ExpFixture()
|
D | Pad.cpp | 32 "name": "inputTensor", in PadFixture()
|
D | Mean.cpp | 32 "name": "inputTensor", in MeanNoReduceFixture()
|
/external/tensorflow/tensorflow/lite/java/src/main/java/org/tensorflow/lite/ |
D | NativeInterpreterWrapper.java | 378 Tensor inputTensor = inputTensors[index]; in getInputTensor() local 379 if (inputTensor == null) { in getInputTensor() 380 inputTensor = in getInputTensor() 384 return inputTensor; in getInputTensor()
|
/external/armnn/src/backends/cl/workloads/ |
D | ClQuantizedLstmWorkload.cpp | 104 …const arm_compute::ICLTensor& inputTensor = static_cast<IClTensorHandle*>(m_Data.m_Inputs[… in ClQuantizedLstmWorkload() local 111 …m_QuantizedLstmLayer.configure(&inputTensor, m_InputToInputWeightsTensor.get(), m_InputToForgetWei… in ClQuantizedLstmWorkload()
|
/external/armnn/src/armnn/ |
D | LoadedNetwork.cpp | 431 auto inputTensor = inputTensorPair.second; in WorkloadData() local 434 …std::make_unique<ConstPassthroughCpuTensorHandle>(inputTensor.GetInfo(),inputTensor.GetMemoryArea(… in WorkloadData() 437 m_InputTensorPins.emplace_back(std::move(tensorHandle), inputTensor.GetInfo(), layerId); in WorkloadData()
|