• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2023 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NN_MAXPOOL_GRAD_WITH_ARGMAX_V2_GPU_KERNEL_H_
18 #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NN_MAXPOOL_GRAD_WITH_ARGMAX_V2_GPU_KERNEL_H_
19 
20 #include <algorithm>
21 #include <vector>
22 #include <string>
23 #include <map>
24 #include <utility>
25 #include "plugin/device/gpu/kernel/gpu_kernel.h"
26 #include "plugin/device/gpu/kernel/gpu_kernel_factory.h"
27 #include "plugin/device/gpu/kernel/kernel_constants.h"
28 
29 namespace mindspore {
30 namespace kernel {
31 class MaxPoolGradWithArgmaxV2GpuKernelMod : public NativeGpuKernelMod {
32  public:
MaxPoolGradWithArgmaxV2GpuKernelMod(const std::string & kernel_name)33   explicit MaxPoolGradWithArgmaxV2GpuKernelMod(const std::string &kernel_name) : kernel_name_(kernel_name) {}
34   ~MaxPoolGradWithArgmaxV2GpuKernelMod() override = default;
35 
36   bool Init(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &outputs) override;
37 
38   int Resize(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &outputs) override;
39 
Launch(const std::vector<KernelTensor * > & inputs,const std::vector<KernelTensor * > & workspace,const std::vector<KernelTensor * > & outputs,void * cuda_stream)40   bool Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspace,
41               const std::vector<KernelTensor *> &outputs, void *cuda_stream) override {
42     if (is_null_input_) {
43       return true;
44     }
45     cuda_stream_ = cuda_stream;
46     return kernel_func_(this, inputs, outputs);
47   }
48 
49   std::vector<KernelAttr> GetOpSupport() override;
50 
51  private:
52   template <typename T, typename S>
53   bool LaunchKernel(const std::vector<kernel::KernelTensor *> &inputs,
54                     const std::vector<kernel::KernelTensor *> &outputs);
55   using MaxPoolArgmaxV2GradFunc =
56     std::function<bool(MaxPoolGradWithArgmaxV2GpuKernelMod *, const std::vector<kernel::KernelTensor *> &,
57                        const std::vector<kernel::KernelTensor *> &)>;
58   static std::vector<std::pair<KernelAttr, MaxPoolArgmaxV2GradFunc>> func_list_;
59   MaxPoolArgmaxV2GradFunc kernel_func_;
60   std::string kernel_name_;
61   void *cuda_stream_{nullptr};
62 
63   int64_t x_hw_{1};
64   int64_t x_chw_{1};
65   int64_t x_nchw_{1};
66   int64_t dy_hw_{1};
67   int64_t dy_chw_{1};
68   int64_t dy_nchw_{1};
69   bool is_null_input_{false};
70 };
71 }  // namespace kernel
72 }  // namespace mindspore
73 
74 #endif  // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NN_MAXPOOL_GRAD_WITH_ARGMAX_V2_GPU_KERNEL_H_
75