Home
last modified time | relevance | path

Searched refs:m_Data (Results 1 – 25 of 268) sorted by relevance

1234567891011

/external/armnn/src/backends/cl/workloads/
DClQLstmWorkload.cpp32 …BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo(… in ClQLstmWorkload()
35 … BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo()); in ClQLstmWorkload()
38 …BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo(… in ClQLstmWorkload()
41 …BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTen… in ClQLstmWorkload()
44 …BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorI… in ClQLstmWorkload()
47 …BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTen… in ClQLstmWorkload()
50 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo()); in ClQLstmWorkload()
53 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo()); in ClQLstmWorkload()
56 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo()); in ClQLstmWorkload()
59 if (m_Data.m_Parameters.m_PeepholeEnabled) in ClQLstmWorkload()
[all …]
DClLstmFloatWorkload.cpp38 …BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo(… in ClLstmFloatWorkload()
41 … BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo()); in ClLstmFloatWorkload()
44 …BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo(… in ClLstmFloatWorkload()
47 …BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTen… in ClLstmFloatWorkload()
50 …BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorI… in ClLstmFloatWorkload()
53 …BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTen… in ClLstmFloatWorkload()
56 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo()); in ClLstmFloatWorkload()
59 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo()); in ClLstmFloatWorkload()
62 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo()); in ClLstmFloatWorkload()
65 if (!m_Data.m_Parameters.m_CifgEnabled) in ClLstmFloatWorkload()
[all …]
DClQuantizedLstmWorkload.cpp70 …BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo()); in ClQuantizedLstmWorkload()
73 …BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo(… in ClQuantizedLstmWorkload()
76 … BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo()); in ClQuantizedLstmWorkload()
79 …BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo(… in ClQuantizedLstmWorkload()
82 …BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTenso… in ClQuantizedLstmWorkload()
85 …BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTen… in ClQuantizedLstmWorkload()
88 …BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorI… in ClQuantizedLstmWorkload()
91 …BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTen… in ClQuantizedLstmWorkload()
94 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo()); in ClQuantizedLstmWorkload()
97 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo()); in ClQuantizedLstmWorkload()
[all …]
DClUnidirectionalSequenceLstmFloatWorkload.cpp43 …const arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor… in ClUnidirectionalSequenceLstmFloatWorkload()
44 … arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetTensor(); in ClUnidirectionalSequenceLstmFloatWorkload()
49 …arm_compute::DataType armComputeDataType = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetD… in ClUnidirectionalSequenceLstmFloatWorkload()
52 TensorShape inputLayerShape = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetShape(); in ClUnidirectionalSequenceLstmFloatWorkload()
53 TensorShape cellStateLayerShape = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetShape(); in ClUnidirectionalSequenceLstmFloatWorkload()
54 TensorShape outputLayerShape = static_cast<IClTensorHandle*>(m_Data.m_Outputs[2])->GetShape(); in ClUnidirectionalSequenceLstmFloatWorkload()
56 … unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1]; in ClUnidirectionalSequenceLstmFloatWorkload()
57 …unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0]; in ClUnidirectionalSequenceLstmFloatWorkload()
68 if (!m_Data.m_Parameters.m_TimeMajor) in ClUnidirectionalSequenceLstmFloatWorkload()
135 if (!m_Data.m_Parameters.m_TimeMajor) in ClUnidirectionalSequenceLstmFloatWorkload()
[all …]
DClBatchNormalizationFloatWorkload.cpp67 BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo()); in ClBatchNormalizationFloatWorkload()
70 BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo()); in ClBatchNormalizationFloatWorkload()
73 BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo()); in ClBatchNormalizationFloatWorkload()
76 BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo()); in ClBatchNormalizationFloatWorkload()
78 m_Data.ValidateInputsOutputs("ClBatchNormalizationFloatWorkload", 1, 1); in ClBatchNormalizationFloatWorkload()
80 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in ClBatchNormalizationFloatWorkload()
81 … arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); in ClBatchNormalizationFloatWorkload()
83 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in ClBatchNormalizationFloatWorkload()
98 m_Data.m_Parameters.m_Eps, in ClBatchNormalizationFloatWorkload()
102 InitializeArmComputeClTensorData(*m_Mean, m_Data.m_Mean); in ClBatchNormalizationFloatWorkload()
[all …]
DClL2NormalizationFloatWorkload.cpp40 m_Data.ValidateInputsOutputs("ClL2NormalizationFloatWorkload", 1, 1); in ClL2NormalizationFloatWorkload()
42 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in ClL2NormalizationFloatWorkload()
43 … arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); in ClL2NormalizationFloatWorkload()
45 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in ClL2NormalizationFloatWorkload()
49 int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0; in ClL2NormalizationFloatWorkload()
53 m_Layer.configure(clCompileContext, &input, &output, axis, m_Data.m_Parameters.m_Eps); in ClL2NormalizationFloatWorkload()
65 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
66 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
74 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
82 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceOutputTensorHandle()
[all …]
DClNormalizationFloatWorkload.cpp42 m_Data.ValidateInputsOutputs("ClNormalizationFloatWorkload", 1, 1); in ClNormalizationFloatWorkload()
44 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in ClNormalizationFloatWorkload()
45 … arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); in ClNormalizationFloatWorkload()
47 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in ClNormalizationFloatWorkload()
51 …malizationLayerInfo normalizationInfo = BuildArmComputeNormalizationLayerInfo(m_Data.m_Parameters); in ClNormalizationFloatWorkload()
67 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
68 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
76 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
84 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceOutputTensorHandle()
85 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceOutputTensorHandle()
[all …]
DClConvertFp16ToFp32Workload.cpp23 this->m_Data.ValidateInputsOutputs("ClConvertFp16ToFp32Workload", 1, 1); in ClConvertFp16ToFp32Workload()
25 …arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor… in ClConvertFp16ToFp32Workload()
26 …arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTens… in ClConvertFp16ToFp32Workload()
66 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
67 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
75 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
83 ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot]; in ReplaceOutputTensorHandle()
84 this->m_Data.m_Outputs[slot] = tensorHandle; in ReplaceOutputTensorHandle()
92 this->m_Data.m_Outputs[slot] = backupHandle; in ReplaceOutputTensorHandle()
99 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in Reconfigure()
[all …]
DClConvertFp32ToFp16Workload.cpp23 this->m_Data.ValidateInputsOutputs("ClConvertFp32ToFp16Workload", 1, 1); in ClConvertFp32ToFp16Workload()
25 …arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(this->m_Data.m_Inputs[0])->GetTensor… in ClConvertFp32ToFp16Workload()
26 …arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(this->m_Data.m_Outputs[0])->GetTens… in ClConvertFp32ToFp16Workload()
66 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
67 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
75 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
83 ITensorHandle* backupHandle = this->m_Data.m_Outputs[slot]; in ReplaceOutputTensorHandle()
84 this->m_Data.m_Outputs[slot] = tensorHandle; in ReplaceOutputTensorHandle()
92 this->m_Data.m_Outputs[slot] = backupHandle; in ReplaceOutputTensorHandle()
99 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in Reconfigure()
[all …]
DClFloorFloatWorkload.cpp28 m_Data.ValidateInputsOutputs("ClFloorFloatWorkload", 1, 1); in ClFloorFloatWorkload()
30 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); in ClFloorFloatWorkload()
31 … arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); in ClFloorFloatWorkload()
46 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
47 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
55 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
63 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceOutputTensorHandle()
64 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceOutputTensorHandle()
72 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceOutputTensorHandle()
/external/armnn/src/backends/neon/workloads/
DNeonQLstmWorkload.cpp30 …BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo(… in NeonQLstmWorkload()
33 … BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo()); in NeonQLstmWorkload()
36 …BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo(… in NeonQLstmWorkload()
39 …BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTen… in NeonQLstmWorkload()
42 …BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorI… in NeonQLstmWorkload()
45 …BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTen… in NeonQLstmWorkload()
48 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo()); in NeonQLstmWorkload()
51 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo()); in NeonQLstmWorkload()
54 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo()); in NeonQLstmWorkload()
57 if (m_Data.m_Parameters.m_PeepholeEnabled) in NeonQLstmWorkload()
[all …]
DNeonLstmFloatWorkload.cpp33 …BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo(… in NeonLstmFloatWorkload()
36 … BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo()); in NeonLstmFloatWorkload()
39 …BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo(… in NeonLstmFloatWorkload()
42 …BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTen… in NeonLstmFloatWorkload()
45 …BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorI… in NeonLstmFloatWorkload()
48 …BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTen… in NeonLstmFloatWorkload()
51 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo()); in NeonLstmFloatWorkload()
54 BuildArmComputeTensor(*m_CellBiasTensor, m_Data.m_CellBias->GetTensorInfo()); in NeonLstmFloatWorkload()
57 BuildArmComputeTensor(*m_OutputGateBiasTensor, m_Data.m_OutputGateBias->GetTensorInfo()); in NeonLstmFloatWorkload()
60 if (!m_Data.m_Parameters.m_CifgEnabled) in NeonLstmFloatWorkload()
[all …]
DNeonQuantizedLstmWorkload.cpp23 …BuildArmComputeTensor(*m_InputToInputWeightsTensor, m_Data.m_InputToInputWeights->GetTensorInfo()); in NeonQuantizedLstmWorkload()
26 …BuildArmComputeTensor(*m_InputToForgetWeightsTensor, m_Data.m_InputToForgetWeights->GetTensorInfo(… in NeonQuantizedLstmWorkload()
29 … BuildArmComputeTensor(*m_InputToCellWeightsTensor, m_Data.m_InputToCellWeights->GetTensorInfo()); in NeonQuantizedLstmWorkload()
32 …BuildArmComputeTensor(*m_InputToOutputWeightsTensor, m_Data.m_InputToOutputWeights->GetTensorInfo(… in NeonQuantizedLstmWorkload()
35 …BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, m_Data.m_RecurrentToInputWeights->GetTenso… in NeonQuantizedLstmWorkload()
38 …BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, m_Data.m_RecurrentToForgetWeights->GetTen… in NeonQuantizedLstmWorkload()
41 …BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, m_Data.m_RecurrentToCellWeights->GetTensorI… in NeonQuantizedLstmWorkload()
44 …BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, m_Data.m_RecurrentToOutputWeights->GetTen… in NeonQuantizedLstmWorkload()
47 BuildArmComputeTensor(*m_InputGateBiasTensor, m_Data.m_InputGateBias->GetTensorInfo()); in NeonQuantizedLstmWorkload()
50 BuildArmComputeTensor(*m_ForgetGateBiasTensor, m_Data.m_ForgetGateBias->GetTensorInfo()); in NeonQuantizedLstmWorkload()
[all …]
DNeonUnidirectionalSequenceLstmFloatWorkload.cpp41 …const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(… in NeonUnidirectionalSequenceLstmFloatWorkload()
42 arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTensor(); in NeonUnidirectionalSequenceLstmFloatWorkload()
47 …arm_compute::DataType armComputeDataType = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->Get… in NeonUnidirectionalSequenceLstmFloatWorkload()
50 TensorShape inputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetShape(); in NeonUnidirectionalSequenceLstmFloatWorkload()
51 … TensorShape cellStateLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->GetShape(); in NeonUnidirectionalSequenceLstmFloatWorkload()
52 TensorShape outputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetShape(); in NeonUnidirectionalSequenceLstmFloatWorkload()
54 … unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1]; in NeonUnidirectionalSequenceLstmFloatWorkload()
55 …unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0]; in NeonUnidirectionalSequenceLstmFloatWorkload()
66 if (!m_Data.m_Parameters.m_TimeMajor) in NeonUnidirectionalSequenceLstmFloatWorkload()
134 if (!m_Data.m_Parameters.m_TimeMajor) in NeonUnidirectionalSequenceLstmFloatWorkload()
[all …]
DNeonUnidirectionalSequenceLstmWorkload.cpp43 …const arm_compute::ITensor& input = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->Ge… in NeonUnidirectionalSequenceLstmWorkload()
44 …arm_compute::ITensor& outputStateIn = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[1])->Ge… in NeonUnidirectionalSequenceLstmWorkload()
45 …const arm_compute::ITensor& cellStateIn = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[2])->Ge… in NeonUnidirectionalSequenceLstmWorkload()
47 …arm_compute::ITensor& outputStateOut = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonUnidirectionalSequenceLstmWorkload()
48 …arm_compute::ITensor& cellStateOut = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[1])->GetTen… in NeonUnidirectionalSequenceLstmWorkload()
49 …arm_compute::ITensor& output = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetTen… in NeonUnidirectionalSequenceLstmWorkload()
54 TensorShape inputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetShape(); in NeonUnidirectionalSequenceLstmWorkload()
55 TensorShape outputLayerShape = static_cast<IAclTensorHandle*>(m_Data.m_Outputs[2])->GetShape(); in NeonUnidirectionalSequenceLstmWorkload()
57 … unsigned int maxTime = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1]; in NeonUnidirectionalSequenceLstmWorkload()
58 …unsigned int batchSize = m_Data.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0]; in NeonUnidirectionalSequenceLstmWorkload()
[all …]
DNeonNormalizationFloatWorkload.cpp72 m_Data.ValidateInputsOutputs("NeonNormalizationFloatWorkload", 1, 1); in NeonNormalizationFloatWorkload()
74 …if (!IsNeonNormalizationDescriptorSupported(m_Data.m_Parameters, Optional<std::string&>(reasonIfUn… in NeonNormalizationFloatWorkload()
88 …arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTenso… in NeonNormalizationFloatWorkload()
89 …arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonNormalizationFloatWorkload()
90 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in NeonNormalizationFloatWorkload()
95 ConvertNormalizationAlgorithmChannelToAclNormType(m_Data.m_Parameters.m_NormChannelType); in NeonNormalizationFloatWorkload()
97 m_Data.m_Parameters.m_NormSize, in NeonNormalizationFloatWorkload()
98 m_Data.m_Parameters.m_Alpha, in NeonNormalizationFloatWorkload()
99 m_Data.m_Parameters.m_Beta, in NeonNormalizationFloatWorkload()
100 m_Data.m_Parameters.m_K, in NeonNormalizationFloatWorkload()
[all …]
DNeonL2NormalizationFloatWorkload.cpp41 m_Data.ValidateInputsOutputs("NeonL2NormalizationFloatWorkload", 1, 1); in NeonL2NormalizationFloatWorkload()
43 …arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTenso… in NeonL2NormalizationFloatWorkload()
44 …arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonL2NormalizationFloatWorkload()
46 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in NeonL2NormalizationFloatWorkload()
50 int axis = (m_Data.m_Parameters.m_DataLayout == DataLayout::NCHW) ? 2 : 0; in NeonL2NormalizationFloatWorkload()
53 layer->configure(&input, &output, axis, m_Data.m_Parameters.m_Eps); in NeonL2NormalizationFloatWorkload()
65 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
66 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
74 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
82 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceOutputTensorHandle()
[all …]
DNeonBatchNormalizationWorkload.cpp69 m_Data.ValidateInputsOutputs("NeonBatchNormalizationWorkload", 1, 1); in NeonBatchNormalizationWorkload()
71 …arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTenso… in NeonBatchNormalizationWorkload()
72 …arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonBatchNormalizationWorkload()
74 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in NeonBatchNormalizationWorkload()
79 BuildArmComputeTensor(*m_Mean, m_Data.m_Mean->GetTensorInfo()); in NeonBatchNormalizationWorkload()
82 BuildArmComputeTensor(*m_Variance, m_Data.m_Variance->GetTensorInfo()); in NeonBatchNormalizationWorkload()
85 BuildArmComputeTensor(*m_Gamma, m_Data.m_Gamma->GetTensorInfo()); in NeonBatchNormalizationWorkload()
88 BuildArmComputeTensor(*m_Beta, m_Data.m_Beta->GetTensorInfo()); in NeonBatchNormalizationWorkload()
99 m_Data.m_Parameters.m_Eps, in NeonBatchNormalizationWorkload()
103 InitializeArmComputeTensorData(*m_Mean, m_Data.m_Mean); in NeonBatchNormalizationWorkload()
[all …]
DNeonTransposeConvolution2dWorkload.cpp60 m_Data.ValidateInputsOutputs("NeonTransposeConvolution2dWorkload", 1, 1); in NeonTransposeConvolution2dWorkload()
62 …arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTens… in NeonTransposeConvolution2dWorkload()
63 …arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonTransposeConvolution2dWorkload()
65 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in NeonTransposeConvolution2dWorkload()
70 …BuildArmComputeTensor(*m_KernelTensor, m_Data.m_Weight->GetTensorInfo(), m_Data.m_Parameters.m_Dat… in NeonTransposeConvolution2dWorkload()
72 if (m_Data.m_Parameters.m_BiasEnabled) in NeonTransposeConvolution2dWorkload()
75 …BuildArmComputeTensor(*m_BiasTensor, m_Data.m_Bias->GetTensorInfo(), m_Data.m_Parameters.m_DataLay… in NeonTransposeConvolution2dWorkload()
78 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters); in NeonTransposeConvolution2dWorkload()
102 InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight); in NeonTransposeConvolution2dWorkload()
104 if (m_Data.m_Parameters.m_BiasEnabled) in NeonTransposeConvolution2dWorkload()
[all …]
DNeonFloorFloatWorkload.cpp20 m_Data.ValidateInputsOutputs("NeonFloorFloatWorkload", 1, 1); in NeonFloorFloatWorkload()
22 …arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTenso… in NeonFloorFloatWorkload()
23 …arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonFloorFloatWorkload()
38 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceInputTensorHandle()
39 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceInputTensorHandle()
47 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceInputTensorHandle()
55 ITensorHandle* backupHandle = this->m_Data.m_Inputs[slot]; in ReplaceOutputTensorHandle()
56 this->m_Data.m_Inputs[slot] = tensorHandle; in ReplaceOutputTensorHandle()
64 this->m_Data.m_Inputs[slot] = backupHandle; in ReplaceOutputTensorHandle()
DNeonConvolution2dWorkload.cpp82 uint32_t numInputs = m_Data.m_Parameters.m_BiasEnabled ? 3: 2; in NeonConvolution2dWorkload()
83 m_Data.ValidateInputsOutputs("NeonConvolution2dWorkload", numInputs, 1); in NeonConvolution2dWorkload()
85 …arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTenso… in NeonConvolution2dWorkload()
86 …arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTen… in NeonConvolution2dWorkload()
88 arm_compute::DataLayout aclDataLayout = ConvertDataLayout(m_Data.m_Parameters.m_DataLayout); in NeonConvolution2dWorkload()
93 …BuildArmComputeTensor(*m_KernelTensor, info.m_InputTensorInfos[1], m_Data.m_Parameters.m_DataLayou… in NeonConvolution2dWorkload()
94 if (m_Data.m_Parameters.m_BiasEnabled) in NeonConvolution2dWorkload()
97 …BuildArmComputeTensor(*m_BiasTensor, info.m_InputTensorInfos[2], m_Data.m_Parameters.m_DataLayout); in NeonConvolution2dWorkload()
100 arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters); in NeonConvolution2dWorkload()
102 … const arm_compute::Size2D aclDilationInfo = BuildArmComputeSize2D(m_Data.m_Parameters.m_DilationX, in NeonConvolution2dWorkload()
[all …]
/external/armnn/src/backends/reference/workloads/
DRefConvolution3dWorkload.cpp37 Execute(m_Data.m_Inputs, m_Data.m_Outputs); in Execute()
42 …kingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); in ExecuteAsync()
60 if (m_Data.m_Parameters.m_BiasEnabled) in Execute()
66 *filterDecoder, m_Data.m_Parameters.m_BiasEnabled, biasDecoder.get(), in Execute()
67 m_Data.m_Parameters.m_DataLayout, in Execute()
68m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, m_Data.m_Parameters.m_PadFront, in Execute()
69m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, m_Data.m_Parameters.m_StrideZ, in Execute()
70m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY, m_Data.m_Parameters.m_DilationZ); in Execute()
DRefConvolution2dWorkload.cpp35 Execute(m_Data.m_Inputs, m_Data.m_Outputs); in Execute()
40 …kingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); in ExecuteAsync()
54 if (m_Data.m_Parameters.m_BiasEnabled) in Execute()
60 *weightsDecoder, m_Data.m_Parameters.m_BiasEnabled, biasDecoder.get(), in Execute()
61m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, in Execute()
62 m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, in Execute()
63 m_Data.m_Parameters.m_DilationX, m_Data.m_Parameters.m_DilationY); in Execute()
DRefDepthwiseConvolution2dWorkload.cpp41 Execute(m_Data.m_Inputs, m_Data.m_Outputs); in Execute()
46 …kingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); in ExecuteAsync()
63 if (m_Data.m_Parameters.m_BiasEnabled) in Execute()
69 filterShape, *filterDecoder, m_Data.m_Parameters.m_BiasEnabled, biasDecoder.get(), in Execute()
70m_Data.m_Parameters.m_DataLayout, m_Data.m_Parameters.m_PadTop, m_Data.m_Parameters.m_PadLeft, in Execute()
71 m_Data.m_Parameters.m_StrideX, m_Data.m_Parameters.m_StrideY, in Execute()
72 m_Data.m_Parameters.m_DilationX, in Execute()
73 m_Data.m_Parameters.m_DilationY, true); in Execute()
DRefDebugWorkload.cpp20 Execute(m_Data.m_Inputs); in Execute()
26 …kingMemDescriptor* workingMemDescriptor = static_cast<WorkingMemDescriptor*>(executionData.m_Data); in ExecuteAsync()
39 const T* inputData = GetInputTensorData<T>(0, m_Data); in Execute()
40 T* outputData = GetOutputTensorData<T>(0, m_Data); in Execute()
44 m_Callback(m_Data.m_Guid, m_Data.m_SlotIndex, inputs[0]); in Execute()
48 …Debug(inputInfo, inputData, m_Data.m_Guid, m_Data.m_LayerName, m_Data.m_SlotIndex, m_Data.m_LayerO… in Execute()

1234567891011