• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021-2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NMS_WITH_MASK_CPU_KERNEL_H_
17 #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NMS_WITH_MASK_CPU_KERNEL_H_
18 #include <map>
19 #include <utility>
20 #include <vector>
21 #include "plugin/device/cpu/kernel/cpu_kernel.h"
22 #include "plugin/factory/ms_factory.h"
23 
24 namespace mindspore {
25 namespace kernel {
26 constexpr int X0 = 0;
27 constexpr int Y0 = 1;
28 constexpr int X1 = 2;
29 constexpr int Y1 = 3;
30 constexpr int SCORE = 4;
31 constexpr size_t INPUT_NUM = 1;
32 constexpr size_t OUTPUT_NUM = 3;
33 class NMSWithMaskCpuKernelMod : public NativeCpuKernelMod, public MatchKernelHelper<NMSWithMaskCpuKernelMod> {
34  public:
35   NMSWithMaskCpuKernelMod() = default;
36   ~NMSWithMaskCpuKernelMod() override = default;
37 
38   bool Init(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &outputs) override;
39   int Resize(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &outputs) override;
Launch(const std::vector<KernelTensor * > & inputs,const std::vector<KernelTensor * > & workspace,const std::vector<KernelTensor * > & outputs)40   bool Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspace,
41               const std::vector<KernelTensor *> &outputs) override {
42     return kernel_func_(this, inputs, workspace, outputs);
43   }
44   const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
45 
GetOpSupport()46   std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
47 
48  private:
49   template <typename T>
50   void NmsBitonicSortByKeyKernel(const int inner, const size_t ceil_power2, const T *input, T *data_buff,
51                                  int *index_buff, int box_size);
52 
53   void MaskInit(size_t numSq, bool *row_mask);
54 
55   template <typename T>
56   void PopulateOutput(const T *data_in, T *data_out, const int *index_buff, const int num, int box_size,
57                       bool flip_mode);
58 
59   void Preprocess(const int num, int *sel_idx, bool *sel_boxes);
60 
61   template <typename T>
62   bool IouDecision(const T *output, int box_A_start, int box_B_start, float IOU_value) const;
63 
64   template <typename T>
65   void NmsPass(const int num, const float IOU_value, const T *output, int box_size, bool *row_mask);
66 
67   void ReducePass(const int num, bool *sel_boxes, const bool *row_mask);
68 
69   template <typename T>
70   bool LaunchKernel(const std::vector<kernel::KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspace,
71                     const std::vector<kernel::KernelTensor *> &outputs);
72 
73   int num_input_{0};
74   float iou_value_{0.0};
75   size_t ceil_power_2_{0};
76   static const int box_size_ = 5;  //  pre_defined box width
77 };
78 }  // namespace kernel
79 }  // namespace mindspore
80 
81 #endif  // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_NMS_WITH_MASK_CPU_KERNEL_H_
82