• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #include "ClDivisionFloatWorkload.hpp"
7 
8 #include <aclCommon/ArmComputeUtils.hpp>
9 #include <backendsCommon/CpuTensorHandle.hpp>
10 
11 #include <cl/ClTensorHandle.hpp>
12 
13 #include "ClWorkloadUtils.hpp"
14 
15 namespace armnn
16 {
17 
ClDivisionWorkloadValidate(const TensorInfo & input0,const TensorInfo & input1,const TensorInfo & output,const ActivationDescriptor * activationDescriptor)18 arm_compute::Status ClDivisionWorkloadValidate(const TensorInfo& input0,
19                                                const TensorInfo& input1,
20                                                const TensorInfo& output,
21                                                const ActivationDescriptor* activationDescriptor)
22 {
23     const arm_compute::TensorInfo aclInput1 = armcomputetensorutils::BuildArmComputeTensorInfo(input0);
24     const arm_compute::TensorInfo aclInput2 = armcomputetensorutils::BuildArmComputeTensorInfo(input1);
25     const arm_compute::TensorInfo aclOutput = armcomputetensorutils::BuildArmComputeTensorInfo(output);
26 
27     const arm_compute::ActivationLayerInfo activationInfo = ConvertActivationDescriptorToAclActivationLayerInfo(
28             activationDescriptor);
29 
30     return arm_compute::CLArithmeticDivision::validate(&aclInput1, &aclInput2, &aclOutput, activationInfo);
31 }
32 
33 
ClDivisionFloatWorkload(const DivisionQueueDescriptor & descriptor,const WorkloadInfo & info)34 ClDivisionFloatWorkload::ClDivisionFloatWorkload(const DivisionQueueDescriptor& descriptor,
35                                                      const WorkloadInfo& info)
36     : FloatWorkload<DivisionQueueDescriptor>(descriptor, info)
37 {
38     m_Data.ValidateInputsOutputs("ClDivisionFloatWorkload", 2, 1);
39 
40     arm_compute::ICLTensor& input0 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[0])->GetTensor();
41     arm_compute::ICLTensor& input1 = static_cast<IClTensorHandle*>(m_Data.m_Inputs[1])->GetTensor();
42     arm_compute::ICLTensor& output = static_cast<IClTensorHandle*>(m_Data.m_Outputs[0])->GetTensor();
43 
44     const arm_compute::ActivationLayerInfo activationInfo = ConvertAdditionalInfoToAclActivationLayerInfo(descriptor);
45 
46     m_ArithmeticDivision.configure(&input0, &input1, &output, activationInfo);
47 }
48 
Execute() const49 void ClDivisionFloatWorkload::Execute() const
50 {
51     ARMNN_SCOPED_PROFILING_EVENT_CL("ClDivisionFloatWorkload_Execute");
52     RunClFunction(m_ArithmeticDivision, CHECK_LOCATION());
53 }
54 
55 } //namespace armnn
56