1 // 2 // Copyright © 2017 Arm Ltd. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 6 #include "ClDequantizeWorkload.hpp" 7 #include "ClWorkloadUtils.hpp" 8 9 #include <aclCommon/ArmComputeTensorUtils.hpp> 10 #include <armnn/utility/PolymorphicDowncast.hpp> 11 #include <backendsCommon/CpuTensorHandle.hpp> 12 13 #include <arm_compute/core/Types.h> 14 15 #include <cl/ClLayerSupport.hpp> 16 #include <cl/ClTensorHandle.hpp> 17 18 namespace armnn 19 { 20 using namespace armcomputetensorutils; 21 ClDequantizeWorkloadValidate(const TensorInfo & input,const TensorInfo & output)22arm_compute::Status ClDequantizeWorkloadValidate(const TensorInfo& input, const TensorInfo& output) 23 { 24 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); 25 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); 26 27 return arm_compute::CLDequantizationLayer::validate(&aclInputInfo, &aclOutputInfo); 28 } 29 ClDequantizeWorkload(const DequantizeQueueDescriptor & descriptor,const WorkloadInfo & workloadInfo)30ClDequantizeWorkload::ClDequantizeWorkload(const DequantizeQueueDescriptor& descriptor, 31 const WorkloadInfo& workloadInfo) 32 : BaseWorkload<DequantizeQueueDescriptor>(descriptor, workloadInfo) 33 { 34 m_Data.ValidateInputsOutputs("ClDequantizeWorkload", 1, 1); 35 36 arm_compute::ICLTensor& input = armnn::PolymorphicPointerDowncast<IClTensorHandle>( 37 m_Data.m_Inputs[0])->GetTensor(); 38 39 arm_compute::ICLTensor& output = armnn::PolymorphicPointerDowncast<IClTensorHandle>( 40 m_Data.m_Outputs[0])->GetTensor(); 41 42 m_Layer.reset(new arm_compute::CLDequantizationLayer()); 43 m_Layer->configure(&input, &output); 44 m_Layer->prepare(); 45 } 46 Execute() const47void ClDequantizeWorkload::Execute() const 48 { 49 if (m_Layer) 50 { 51 ARMNN_SCOPED_PROFILING_EVENT_CL("ClDequantizeWorkload_Execute"); 52 m_Layer->run(); 53 } 54 } 55 56 } // namespace armnn 57