1 // 2 // Copyright © 2020 Arm Ltd and Contributors. All rights reserved. 3 // SPDX-License-Identifier: MIT 4 // 5 6 #include "NeonLogicalNotWorkload.hpp" 7 8 #include "NeonWorkloadUtils.hpp" 9 10 #include <aclCommon/ArmComputeTensorHandle.hpp> 11 #include <aclCommon/ArmComputeTensorUtils.hpp> 12 13 #include <armnn/utility/PolymorphicDowncast.hpp> 14 15 16 namespace armnn 17 { 18 NeonLogicalNotWorkloadValidate(const TensorInfo & input,const TensorInfo & output)19arm_compute::Status NeonLogicalNotWorkloadValidate(const TensorInfo& input, 20 const TensorInfo& output) 21 { 22 const arm_compute::TensorInfo aclInputInfo = BuildArmComputeTensorInfo(input); 23 const arm_compute::TensorInfo aclOutputInfo = BuildArmComputeTensorInfo(output); 24 25 const arm_compute::Status aclStatus = arm_compute::NELogicalNot::validate(&aclInputInfo, 26 &aclOutputInfo); 27 return aclStatus; 28 } 29 NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor & descriptor,const WorkloadInfo & info)30NeonLogicalNotWorkload::NeonLogicalNotWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, 31 const WorkloadInfo& info) 32 : BaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info) 33 { 34 m_Data.ValidateInputsOutputs("NeonLogicalNotWorkload", 1, 1); 35 36 arm_compute::ITensor& input = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Inputs[0])->GetTensor(); 37 arm_compute::ITensor& output = PolymorphicDowncast<IAclTensorHandle*>(m_Data.m_Outputs[0])->GetTensor(); 38 39 m_LogicalNotLayer.configure(&input, &output); 40 } 41 Execute() const42void NeonLogicalNotWorkload::Execute() const 43 { 44 ARMNN_SCOPED_PROFILING_EVENT_NEON("NeonLogicalNotWorkload_Execute"); 45 m_LogicalNotLayer.run(); 46 } 47 48 } // namespace armnn 49