• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/backends/Workload.hpp>
9 
10 #include <arm_compute/core/Error.h>
11 #include <arm_compute/runtime/IFunction.h>
12 #include <arm_compute/runtime/MemoryManagerOnDemand.h>
13 
14 #include <memory>
15 
16 namespace armnn
17 {
18 
19 arm_compute::Status NeonL2NormalizationWorkloadValidate(const TensorInfo& input,
20                                                         const TensorInfo& output,
21                                                         const L2NormalizationDescriptor& descriptor);
22 
23 class NeonL2NormalizationFloatWorkload : public FloatWorkload<L2NormalizationQueueDescriptor>
24 {
25 public:
26     NeonL2NormalizationFloatWorkload(const L2NormalizationQueueDescriptor& descriptor, const WorkloadInfo& info,
27                                      std::shared_ptr<arm_compute::MemoryManagerOnDemand>& memoryManager);
28     virtual void Execute() const override;
29     // Replace input tensor handle with the given TensorHandle
30     void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
31 
32     // Replace output tensor handle with the given TensorHandle
33     void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
34 private:
35     std::unique_ptr<arm_compute::IFunction> m_Layer;
36     virtual void Reconfigure();
37 };
38 
39 } //namespace armnn
40 
41