• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //
2 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
3 // SPDX-License-Identifier: MIT
4 //
5 
6 #pragma once
7 
8 #include <armnn/Descriptors.hpp>
9 #include <armnn/LstmParams.hpp>
10 #include <armnn/backends/Workload.hpp>
11 #include <armnn/backends/WorkloadData.hpp>
12 
13 #include <arm_compute/runtime/CL/functions/CLLSTMLayer.h>
14 
15 namespace armnn
16 {
17 
18 class ClLstmFloatWorkload : public FloatWorkload<LstmQueueDescriptor>
19 {
20 public:
21     ClLstmFloatWorkload(const LstmQueueDescriptor& descriptor,
22                         const WorkloadInfo& info,
23                         const arm_compute::CLCompileContext& clCompileContext);
24     void Execute() const override;
25     // Replace input tensor handle with the given TensorHandle
26     void ReplaceInputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
27 
28     // Replace output tensor handle with the given TensorHandle
29     void ReplaceOutputTensorHandle(ITensorHandle* tensorHandle, unsigned int slot) override;
30 private:
31     mutable arm_compute::CLLSTMLayer m_LstmLayer;
32     virtual void Reconfigure();
33 
34     std::unique_ptr<arm_compute::CLTensor> m_InputToInputWeightsTensor;
35     std::unique_ptr<arm_compute::CLTensor> m_InputToForgetWeightsTensor;
36     std::unique_ptr<arm_compute::CLTensor> m_InputToCellWeightsTensor;
37     std::unique_ptr<arm_compute::CLTensor> m_InputToOutputWeightsTensor;
38     std::unique_ptr<arm_compute::CLTensor> m_RecurrentToInputWeightsTensor;
39     std::unique_ptr<arm_compute::CLTensor> m_RecurrentToForgetWeightsTensor;
40     std::unique_ptr<arm_compute::CLTensor> m_RecurrentToCellWeightsTensor;
41     std::unique_ptr<arm_compute::CLTensor> m_RecurrentToOutputWeightsTensor;
42     std::unique_ptr<arm_compute::CLTensor> m_CellToInputWeightsTensor;
43     std::unique_ptr<arm_compute::CLTensor> m_CellToForgetWeightsTensor;
44     std::unique_ptr<arm_compute::CLTensor> m_CellToOutputWeightsTensor;
45     std::unique_ptr<arm_compute::CLTensor> m_InputGateBiasTensor;
46     std::unique_ptr<arm_compute::CLTensor> m_ForgetGateBiasTensor;
47     std::unique_ptr<arm_compute::CLTensor> m_CellBiasTensor;
48     std::unique_ptr<arm_compute::CLTensor> m_OutputGateBiasTensor;
49     std::unique_ptr<arm_compute::CLTensor> m_ProjectionWeightsTensor;
50     std::unique_ptr<arm_compute::CLTensor> m_ProjectionBiasTensor;
51     std::unique_ptr<arm_compute::CLTensor> m_InputLayerNormWeightsTensor;
52     std::unique_ptr<arm_compute::CLTensor> m_ForgetLayerNormWeightsTensor;
53     std::unique_ptr<arm_compute::CLTensor> m_CellLayerNormWeightsTensor;
54     std::unique_ptr<arm_compute::CLTensor> m_OutputLayerNormWeightsTensor;
55 
56     std::unique_ptr<arm_compute::CLTensor> m_ScratchBuffer;
57 
58     void FreeUnusedTensors();
59 };
60 
61 arm_compute::Status ClLstmFloatWorkloadValidate(const TensorInfo& input, const TensorInfo& outputStateIn,
62                                                 const TensorInfo& cellStateIn, const TensorInfo& scratchBuffer,
63                                                 const TensorInfo& outputStateOut, const TensorInfo& cellStateOut,
64                                                 const TensorInfo& output, const LstmDescriptor &descriptor,
65                                                 const LstmInputParamsInfo& paramsInfo);
66 } //namespace armnn
67