• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "backend/kernel_compiler/cpu/map_cache_idx_cpu_kernel.h"
18 #include <string>
19 #include <memory>
20 #include <vector>
21 #include "runtime/device/cpu/cpu_device_address.h"
22 #include "utils/cache_embedding_hashmap_struct.h"
23 
24 namespace mindspore {
25 namespace kernel {
26 namespace {
27 constexpr size_t kMapCacheIdxInputsNum = 5;
28 constexpr size_t kMapCacheIdxOutputsNum = 4;
29 }  // namespace
30 
31 template <typename T>
Compress(HashmapEntry<T> * entry_p,const size_t & length,T entry)32 int Compress(HashmapEntry<T> *entry_p, const size_t &length, T entry) {
33   T i = (entry + 1) % static_cast<T>(length);
34   T off = 1;
35   int compress_count = 0;
36   for (; !entry_p[i].IsEmpty(); i = (i + 1) % static_cast<T>(length), off++) {
37     if (entry_p[i].tag_ > off) {
38       entry_p[entry].key_ = entry_p[i].key_;
39       entry_p[entry].value_ = entry_p[i].value_;
40       entry_p[entry].step_ = entry_p[i].step_;
41       entry_p[entry].tag_ = entry_p[i].tag_ - off;
42       entry_p[i].SetEmpty();
43       off = 0;
44       entry = i;
45     }
46     compress_count++;
47   }
48   return compress_count;
49 }
50 
UpdateShape(size_t miss_count,const CNodePtr & node)51 void UpdateShape(size_t miss_count, const CNodePtr &node) {
52   std::vector<size_t> out_shape;
53   (void)out_shape.emplace_back(miss_count);
54   size_t output_num = AnfAlgo::GetOutputTensorNum(node);
55   std::vector<TypeId> dtypes(output_num);
56   for (size_t i = 0; i < output_num; i++) {
57     dtypes[i] = AnfAlgo::GetOutputDeviceDataType(node, i);
58   }
59   AnfAlgo::SetOutputInferTypeAndShape(dtypes, {AnfAlgo::GetOutputInferShape(node, 0), out_shape, out_shape, out_shape},
60                                       node.get());
61 }
62 
InitKernel(const CNodePtr & kernel_node)63 void MapCacheIdxCPUKernel::InitKernel(const CNodePtr &kernel_node) {
64   MS_EXCEPTION_IF_NULL(kernel_node);
65   kernel_name_ = AnfAlgo::GetCNodeName(kernel_node);
66   node_wpt_ = kernel_node;
67   auto hashmap_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0);
68   if (hashmap_shape.size() != 2) {
69     MS_LOG(EXCEPTION) << "Dimension of HashMap must be 2, (n, 4)";
70   }
71   hashmap_length_ = hashmap_shape[0];
72   if (hashmap_length_ == 0) {
73     MS_LOG(EXCEPTION) << "Value of hashmap_length_ must > 0!";
74   }
75   dtype_ = AnfAlgo::GetInputDeviceDataType(kernel_node, 0);
76 }
77 
Launch(const std::vector<kernel::AddressPtr> & inputs,const std::vector<kernel::AddressPtr> &,const std::vector<kernel::AddressPtr> & outputs)78 bool MapCacheIdxCPUKernel::Launch(const std::vector<kernel::AddressPtr> &inputs,
79                                   const std::vector<kernel::AddressPtr> &,
80                                   const std::vector<kernel::AddressPtr> &outputs) {
81   CHECK_KERNEL_INPUTS_NUM(inputs.size(), kMapCacheIdxInputsNum, kernel_name_);
82   CHECK_KERNEL_OUTPUTS_NUM(outputs.size(), kMapCacheIdxOutputsNum, kernel_name_);
83   if (dtype_ == kNumberTypeInt32) {
84     LaunchKernel<int>(inputs, outputs);
85   } else if (dtype_ == kNumberTypeInt64) {
86     LaunchKernel<int64_t>(inputs, outputs);
87   } else {
88     MS_LOG(EXCEPTION) << "Only support int32, int64";
89   }
90   return true;
91 }
92 
93 template <typename T>
LaunchKernel(const std::vector<AddressPtr> & inputs,const std::vector<kernel::AddressPtr> & outputs)94 void MapCacheIdxCPUKernel::LaunchKernel(const std::vector<AddressPtr> &inputs,
95                                         const std::vector<kernel::AddressPtr> &outputs) {
96   auto node = node_wpt_.lock();
97   auto emb_idx_shape = AnfAlgo::GetPrevNodeOutputInferShape(node, 1);
98   batch_size_ = 1;
99   for (size_t i = 0; i < emb_idx_shape.size(); ++i) {
100     batch_size_ *= emb_idx_shape[i];
101   }
102   HashmapEntry<T> *hashmap = reinterpret_cast<HashmapEntry<T> *>(inputs[0]->addr);
103   auto input_indices = reinterpret_cast<T *>(inputs[1]->addr);
104   T *step_ = reinterpret_cast<T *>(inputs[2]->addr);
105   T emb_max_num = *reinterpret_cast<T *>(inputs[3]->addr);
106   T offset = *reinterpret_cast<T *>(inputs[4]->addr);
107   auto output_cache_idx = reinterpret_cast<T *>(outputs[0]->addr);
108   auto output_old_emb_idx = reinterpret_cast<T *>(outputs[1]->addr);
109   auto output_miss_emb_idx = reinterpret_cast<T *>(outputs[2]->addr);
110   auto output_swap_cache_idx = reinterpret_cast<T *>(outputs[3]->addr);
111   std::vector<T> miss_idx;
112   size_t miss_count = 0;
113   float total_count = 0;
114   int count_size = 0;
115   float hit_count = 0;
116   // search_cache_idx
117   for (size_t i = 0; i < batch_size_; ++i) {
118     T key = input_indices[i] - offset;
119     if (key >= emb_max_num || key < 0) {
120       output_cache_idx[i] = -1;
121       continue;
122     }
123     T tmp_entry = HashFunc(key, hashmap_length_);
124     size_t count = 1;
125     count_size += 1;
126     while ((!hashmap[tmp_entry].IsEmpty() && !hashmap[tmp_entry].IsKey(key))) {
127       tmp_entry = (tmp_entry + 1) % static_cast<T>(hashmap_length_);
128       if (count > hashmap_length_) {
129         MS_LOG(EXCEPTION) << "Hashmap is full, search cache idx failed, please set a larger vocab_cache_size!";
130       }
131       count += 1;
132     }
133     total_count += SizeToFloat(count);
134     if (hashmap[tmp_entry].IsEmpty()) {
135       (void)miss_idx.emplace_back(i);
136       output_miss_emb_idx[miss_count] = key;
137       output_cache_idx[i] = -1;
138       miss_count++;
139     } else {
140       hit_count += 1;
141       output_cache_idx[i] = hashmap[tmp_entry].value_;
142       hashmap[tmp_entry].step_ = step_[0];
143     }
144   }
145   if (miss_count != 0) {
146     MS_LOG(INFO) << "Miss count: " << miss_count;
147   }
148   if (count_size != 0) {
149     MS_LOG(INFO) << "Avg search count: " << total_count / count_size;
150     MS_LOG(INFO) << "Cache hit rate: " << hit_count / count_size;
151   }
152   float total_insert_count = 0;
153   float total_delete_count = 0;
154   // swap hash map
155   for (size_t i = 0; i < miss_count; ++i) {
156     T emb_idx = output_miss_emb_idx[i];
157     T entry = HashFunc(emb_idx, hashmap_length_);
158     size_t tag_count = 1;
159     while (!hashmap[entry].IsEmpty()) {
160       entry = (entry + 1) % static_cast<T>(hashmap_length_);
161       if (tag_count > hashmap_length_) {
162         MS_LOG(EXCEPTION) << "Hashmap is full, insert new key failed, please set a larger vocab_cache_size!";
163       }
164       tag_count++;
165     }
166     hashmap[entry].key_ = emb_idx;
167     hashmap[entry].step_ = step_[0];
168     hashmap[entry].tag_ = static_cast<T>(tag_count);
169     T tmp_entry = (entry + 1) % static_cast<T>(hashmap_length_);
170     size_t delete_count = 1;
171     while (hashmap[tmp_entry].IsEmpty() || hashmap[tmp_entry].IsUsing(step_[0])) {
172       tmp_entry = (tmp_entry + 1) % static_cast<T>(hashmap_length_);
173       if (delete_count > hashmap_length_) {
174         MS_LOG(EXCEPTION) << "Hashmap is full, delete old key failed, please set a larger vocab_cache_size!";
175       }
176       delete_count++;
177     }
178     output_swap_cache_idx[i] = hashmap[tmp_entry].value_;
179     output_old_emb_idx[i] = hashmap[tmp_entry].key_;
180     hashmap[entry].value_ = output_swap_cache_idx[i];
181     hashmap[tmp_entry].SetEmpty();
182     int compress_count = Compress(hashmap, hashmap_length_, tmp_entry);
183     total_delete_count += IntToFloat(compress_count + SizeToInt(delete_count));
184     total_insert_count += SizeToFloat(tag_count);
185   }
186   if (miss_count != 0) {
187     MS_LOG(INFO) << "Insert count: " << total_insert_count / miss_count;
188     MS_LOG(INFO) << "Delete count: " << total_delete_count / miss_count;
189   }
190   step_[0] += 1;
191   for (size_t i = 0; i < miss_count; ++i) {
192     output_cache_idx[miss_idx[i]] = output_swap_cache_idx[i];
193   }
194   UpdateShape(miss_count, node);
195 }
196 }  // namespace kernel
197 }  // namespace mindspore
198