1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5
6 #include "ClFullyConnectedWorkload.hpp"
7 #include <cl/ClTensorHandle.hpp>
8 #include <backendsCommon/CpuTensorHandle.hpp>
9 #include <aclCommon/ArmComputeTensorUtils.hpp>
10 #include <aclCommon/ArmComputeUtils.hpp>
11 #include <cl/ClLayerSupport.hpp>
12
13 #include "ClWorkloadUtils.hpp"
14
15 namespace armnn
16 {
17 using namespace armcomputetensorutils;
18
ClFullyConnectedWorkloadValidate(const TensorInfo & input,const TensorInfo & output,const TensorInfo & weights,const TensorInfo & biases,const FullyConnectedDescriptor & descriptor,const ActivationDescriptor * activationDescriptor)19 arm_compute::Status ClFullyConnectedWorkloadValidate(const TensorInfo& input,
20 const TensorInfo& output,
21 const TensorInfo& weights,
22 const TensorInfo& biases,
23 const FullyConnectedDescriptor& descriptor,
24 const ActivationDescriptor* activationDescriptor)
25 {
26 const arm_compute::TensorInfo aclInput = BuildArmComputeTensorInfo(input);
27 const arm_compute::TensorInfo aclOutput = BuildArmComputeTensorInfo(output);
28 const arm_compute::TensorInfo aclWeights = BuildArmComputeTensorInfo(weights);
29
30 arm_compute::TensorInfo aclBiases;
31 arm_compute::TensorInfo *optionalAclBiases = nullptr;
32 if (descriptor.m_BiasEnabled)
33 {
34 aclBiases = BuildArmComputeTensorInfo(biases);
35 optionalAclBiases = &aclBiases;
36 }
37
38 const arm_compute::FullyConnectedLayerInfo fullyConnectedLayerInfo =
39 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor, activationDescriptor);
40
41 return arm_compute::CLFullyConnectedLayer::validate(&aclInput,
42 &aclWeights,
43 optionalAclBiases,
44 &aclOutput,
45 fullyConnectedLayerInfo);
46 }
47
ClFullyConnectedWorkload(const FullyConnectedQueueDescriptor & descriptor,const WorkloadInfo & info,std::shared_ptr<arm_compute::MemoryManagerOnDemand> & memoryManager)48 ClFullyConnectedWorkload::ClFullyConnectedWorkload(const FullyConnectedQueueDescriptor& descriptor,
49 const WorkloadInfo& info, std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager)
50 : BaseWorkload<FullyConnectedQueueDescriptor>(descriptor, info)
51 , m_FullyConnectedLayer(memoryManager)
52 {
53 m_WeightsTensor = std::make_unique<arm_compute::CLTensor>();
54 BuildArmComputeTensor(*m_WeightsTensor, m_Data.m_Weight->GetTensorInfo());
55
56 if (m_Data.m_Parameters.m_BiasEnabled)
57 {
58 m_BiasesTensor = std::make_unique<arm_compute::CLTensor>();
59 BuildArmComputeTensor(*m_BiasesTensor, m_Data.m_Bias->GetTensorInfo());
60 }
61
62 m_Data.ValidateInputsOutputs("ClFullyConnectedWorkload", 1, 1);
63
64 arm_compute::ICLTensor& input = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
65 arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
66
67 const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
68
69 arm_compute::FullyConnectedLayerInfo fc_info =
70 ConvertFullyConnectedDescriptorToAclFullyConnectedLayerInfo(descriptor.m_Parameters, activationInfo);
71
72 m_FullyConnectedLayer.configure(&input, m_WeightsTensor.get(), m_BiasesTensor.get(), &output, fc_info);
73
74 InitializeArmComputeClTensorData(*m_WeightsTensor, m_Data.m_Weight);
75
76 if (m_BiasesTensor)
77 {
78 InitializeArmComputeClTensorData(*m_BiasesTensor, m_Data.m_Bias);
79 }
80
81 // Force Compute Library to perform the necessary copying and reshaping, after which
82 // delete all the input tensors that will no longer be needed
83 m_FullyConnectedLayer.prepare();
84 FreeUnusedTensors();
85 }
86
Execute() const87 void ClFullyConnectedWorkload::Execute() const
88 {
89 ARMNN_SCOPED_PROFILING_EVENT_CL("ClFullyConnectedWorkload_Execute");
90 RunClFunction(m_FullyConnectedLayer, CHECK_LOCATION());
91 }
92
FreeUnusedTensors()93 void ClFullyConnectedWorkload::FreeUnusedTensors()
94 {
95 FreeTensorIfUnused(m_WeightsTensor);
96 FreeTensorIfUnused(m_BiasesTensor);
97 }
98
99 } //namespace armnn
100