1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "NeonFullyConnectedWorkload.hpp"
7
8 #include "NeonWorkloadUtils.hpp"
9
10 #include <aclCommon/ArmComputeTensorUtils.hpp>
11 #include <aclCommon/ArmComputeUtils.hpp>
12
13 #include <armnn/utility/PolymorphicDowncast.hpp>
14
15 #include <backendsCommon/CpuTensorHandle.hpp>
16
17 #include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
18
19 namespace armnn
20 {
21 using namespace armcomputetensorutils;
22
NeonFullyConnectedWorkloadValidate(const TensorInfo & input,const TensorInfo & output,const TensorInfo & weights,const TensorInfo & biases,const FullyConnectedDescriptor & descriptor,const ActivationDescriptor * activationDescriptor)23 arm_compute::Status NeonFullyConnectedWorkloadValidate(const TensorInfo& input,
24 const TensorInfo& output,
25 const TensorInfo& weights,
26 const TensorInfo& biases,
27 const FullyConnectedDescriptor& descriptor,
28 const ActivationDescriptor* activationDescriptor)
29 {
30 if (activationDescriptor)
31 {
32 std::vector<ActivationFunction> activations = {ActivationFunction::ReLu, ActivationFunction::BoundedReLu};
33 if (std::find(activations.begin(), activations.end(), activationDescriptor->m_Function) == activations.end())
34 {
35 return arm_compute::Status{
36 arm_compute::ErrorCode::RUNTIME_ERROR, "NeonFullyConnectedWorkload :Unsupported Activation Function"};
37 }
38 }
39
40 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
41 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
42 const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
43
44 arm_compute::TensorInfo aclBiases;
45 arm_compute::TensorInfo *optionalAclBiases = nullptr;
46 if (descriptor.m_BiasEnabled)
47 {
48 aclBiases = BuildArmComputeTensorInfo(biases);
49 optionalAclBiases = &aclBiases;
50 }
51
52 const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
53 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
54
55 return arm_compute::NEFullyConnectedLayer::validate(&aclInput,
56 &aclWeights,
57 optionalAclBiases,
58 &aclOutput,
59 fullyConnectedLayerInfo);
60 }
61
NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor & descriptor,const WorkloadInfo & info,std::shared_ptr<arm_compute::MemoryManagerOnDemand> & memoryManager)62 NeonFullyConnectedWorkload::NeonFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor,
63 const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
64 : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
65 {
66 m_Data.ValidateInputsOutputs("NeonFullyConnectedWorkload", 1, 1);
67
68 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
69 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
70
71 m_WeightsTensor = std::make_unique<arm_compute::Tensor>();
72 BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
73
74 if (m_Data.m_Parameters.m_BiasEnabled)
75 {
76 m_BiasesTensor = std::make_unique<arm_compute::Tensor>();
77 BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
78 }
79
80 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
81
82 arm_compute::FullyConnectedLayerInfo fc_info =
83 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo);
84
85 auto layer = std::make_unique<arm_compute::NEFullyConnectedLayer>(memoryManager);
86 layer->configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
87 m_FullyConnectedLayer.reset(layer.release());
88
89 // Allocate
90 if (m_Data.m_Weight->GetTensorInfo().GetDataType() == DataType::QAsymmU8)
91 {
92 InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
93 }
94 else
95 {
96 InitializeArmComputeTensorData(*m_WeightsTensor, m_Data.m_Weight);
97 }
98
99 if (m_BiasesTensor)
100 {
101 if (m_Data.m_Bias->GetTensorInfo().GetDataType() == DataType::Signed32)
102 {
103 InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
104 }
105 else
106 {
107 InitializeArmComputeTensorData(*m_BiasesTensor, m_Data.m_Bias);
108 }
109 }
110
111 // Force Compute Library to perform the necessary copying and reshaping, after which
112 // delete all the input tensors that will no longer be needed
113 m_FullyConnectedLayer->prepare();
114 FreeUnusedTensors();
115 }
116
Execute() const117 void NeonFullyConnectedWorkload::Execute() const
118 {
119 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonFullyConnectedWorkload_Execute");
120 m_FullyConnectedLayer->run();
121 }
122
FreeUnusedTensors()123 void NeonFullyConnectedWorkload::FreeUnusedTensors()
124 {
125 FreeTensorIfUnused(m_WeightsTensor);
126 FreeTensorIfUnused(m_BiasesTensor);
127 }
128
129 } //namespace armnn
130