• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_EMBEDDING_LOOK_UP_CPU_KERNEL_H_
18 #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_EMBEDDING_LOOK_UP_CPU_KERNEL_H_
19 
20 #include <vector>
21 #include <memory>
22 #include <string>
23 #include <map>
24 #include <functional>
25 #include <utility>
26 
27 #include "plugin/device/cpu/kernel/cpu_kernel.h"
28 #include "plugin/device/cpu/hal/device/cpu_device_address.h"
29 #include "plugin/factory/ms_factory.h"
30 #include "include/common/thread_pool.h"
31 
32 namespace mindspore {
33 namespace kernel {
34 class EmbeddingLookUpCpuKernelMod : public NativeCpuKernelMod, public MatchKernelHelper<EmbeddingLookUpCpuKernelMod> {
35  public:
36   EmbeddingLookUpCpuKernelMod() = default;
37   ~EmbeddingLookUpCpuKernelMod() override = default;
38 
39   bool Init(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &outputs) override;
40 
41   int Resize(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &outputs) override;
42 
Launch(const std::vector<KernelTensor * > & inputs,const std::vector<KernelTensor * > & workspace,const std::vector<KernelTensor * > & outputs)43   bool Launch(const std::vector<KernelTensor *> &inputs, const std::vector<KernelTensor *> &workspace,
44               const std::vector<KernelTensor *> &outputs) override {
45     return kernel_func_(this, inputs, workspace, outputs);
46   }
47 
48   const std::vector<std::pair<KernelAttr, KernelRunFunc>> &GetFuncList() const override;
49 
GetOpSupport()50   std::vector<KernelAttr> GetOpSupport() override { return OpSupport(); }
51 
52  protected:
53   template <typename T, typename S, typename G>
54   bool LaunchKernel(const std::vector<kernel::KernelTensor *> &inputs, const std::vector<kernel::KernelTensor *> &,
55                     const std::vector<kernel::KernelTensor *> &outputs);
56 
57   int64_t offset_;
58   size_t input_indices_lens_{1};
59   size_t first_dim_size_{1};
60   size_t outer_dim_size_{1};
61   TypeId input_indices_dtype_{kNumberTypeInt32};
62   TypeId input_params_dtype_{kTypeUnknown};
63 
64   // This flag indicates whether the embedding storage capability is enabled, which supports hot data caching and
65   // persistent storage of non-hotspot data for embedding tables, which is generally used in very large embedding table
66   // scenarios.
67   bool enable_embedding_storage_{false};
68   // The global unique parameter key, used to get the embedding storage instance.
69   int32_t parameter_key_{-1};
70 };
71 }  // namespace kernel
72 }  // namespace mindspore
73 
74 #endif  // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_CPU_EMBEDDING_LOOK_UP_CPU_KERNEL_H_
75