1 //===-- Resizable Monotonic HashTable ---------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #ifndef LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
10 #define LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
11
12 #include "include/llvm-libc-types/ENTRY.h"
13 #include "src/__support/CPP/bit.h" // bit_ceil
14 #include "src/__support/CPP/new.h"
15 #include "src/__support/HashTable/bitmask.h"
16 #include "src/__support/hash.h"
17 #include "src/__support/macros/attributes.h"
18 #include "src/__support/macros/optimization.h"
19 #include "src/__support/memory_size.h"
20 #include "src/string/memset.h"
21 #include "src/string/strcmp.h"
22 #include "src/string/strlen.h"
23 #include <stddef.h>
24 #include <stdint.h>
25
26 namespace LIBC_NAMESPACE {
27 namespace internal {
28
secondary_hash(uint64_t hash)29 LIBC_INLINE uint8_t secondary_hash(uint64_t hash) {
30 // top 7 bits of the hash.
31 return static_cast<uint8_t>(hash >> 57);
32 }
33
34 // Probe sequence based on triangular numbers, which is guaranteed (since our
35 // table size is a power of two) to visit every group of elements exactly once.
36 //
37 // A triangular probe has us jump by 1 more group every time. So first we
38 // jump by 1 group (meaning we just continue our linear scan), then 2 groups
39 // (skipping over 1 group), then 3 groups (skipping over 2 groups), and so on.
40 //
41 // If we set sizeof(Group) to be one unit:
42 // T[k] = sum {1 + 2 + ... + k} = k * (k + 1) / 2
43 // It is provable that T[k] mod 2^m generates a permutation of
44 // 0, 1, 2, 3, ..., 2^m - 2, 2^m - 1
45 // Detailed proof is available at:
46 // https://fgiesen.wordpress.com/2015/02/22/triangular-numbers-mod-2n/
47 struct ProbeSequence {
48 size_t position;
49 size_t stride;
50 size_t entries_mask;
51
nextProbeSequence52 LIBC_INLINE size_t next() {
53 position += stride;
54 position &= entries_mask;
55 stride += sizeof(Group);
56 return position;
57 }
58 };
59
60 // The number of entries is at least group width: we do not
61 // need to do the fixup when we set the control bytes.
62 // The number of entries is at least 8: we don't have to worry
63 // about special sizes when check the fullness of the table.
capacity_to_entries(size_t cap)64 LIBC_INLINE size_t capacity_to_entries(size_t cap) {
65 if (8 >= sizeof(Group) && cap < 8)
66 return 8;
67 if (16 >= sizeof(Group) && cap < 15)
68 return 16;
69 if (cap < sizeof(Group))
70 cap = sizeof(Group);
71 // overflow is always checked in allocate()
72 return cpp::bit_ceil(cap * 8 / 7);
73 }
74
75 // The heap memory layout for N buckets HashTable is as follows:
76 //
77 // =======================
78 // | N * Entry |
79 // ======================= <- align boundary
80 // | Header |
81 // ======================= <- align boundary (for fast resize)
82 // | (N + 1) * Byte |
83 // =======================
84 //
85 // The trailing group part is to make sure we can always load
86 // a whole group of control bytes.
87
88 struct HashTable {
89 HashState state;
90 size_t entries_mask; // number of buckets - 1
91 size_t available_slots; // less than capacity
92 private:
93 // How many entries are there in the table.
num_of_entriesHashTable94 LIBC_INLINE size_t num_of_entries() const { return entries_mask + 1; }
95
96 // How many entries can we store in the table before resizing.
full_capacityHashTable97 LIBC_INLINE size_t full_capacity() const { return num_of_entries() / 8 * 7; }
98
99 // The alignment of the whole memory area is the maximum of the alignment
100 // among the following types:
101 // - HashTable
102 // - ENTRY
103 // - Group
table_alignmentHashTable104 LIBC_INLINE constexpr static size_t table_alignment() {
105 size_t left_align = alignof(HashTable) > alignof(ENTRY) ? alignof(HashTable)
106 : alignof(ENTRY);
107 return left_align > alignof(Group) ? left_align : alignof(Group);
108 }
109
is_fullHashTable110 LIBC_INLINE bool is_full() const { return available_slots == 0; }
111
offset_from_entriesHashTable112 LIBC_INLINE size_t offset_from_entries() const {
113 size_t entries_size = num_of_entries() * sizeof(ENTRY);
114 return entries_size +
115 SafeMemSize::offset_to(entries_size, table_alignment());
116 }
117
offset_to_groupsHashTable118 LIBC_INLINE constexpr static size_t offset_to_groups() {
119 size_t header_size = sizeof(HashTable);
120 return header_size + SafeMemSize::offset_to(header_size, table_alignment());
121 }
122
entryHashTable123 LIBC_INLINE ENTRY &entry(size_t i) {
124 return reinterpret_cast<ENTRY *>(this)[-i - 1];
125 }
126
entryHashTable127 LIBC_INLINE const ENTRY &entry(size_t i) const {
128 return reinterpret_cast<const ENTRY *>(this)[-i - 1];
129 }
130
controlHashTable131 LIBC_INLINE uint8_t &control(size_t i) {
132 uint8_t *ptr = reinterpret_cast<uint8_t *>(this) + offset_to_groups();
133 return ptr[i];
134 }
135
controlHashTable136 LIBC_INLINE const uint8_t &control(size_t i) const {
137 const uint8_t *ptr =
138 reinterpret_cast<const uint8_t *>(this) + offset_to_groups();
139 return ptr[i];
140 }
141
142 // We duplicate a group of control bytes to the end. Thus, it is possible that
143 // we need to set two control bytes at the same time.
set_ctrlHashTable144 LIBC_INLINE void set_ctrl(size_t index, uint8_t value) {
145 size_t index2 = ((index - sizeof(Group)) & entries_mask) + sizeof(Group);
146 control(index) = value;
147 control(index2) = value;
148 }
149
findHashTable150 LIBC_INLINE size_t find(const char *key, uint64_t primary) {
151 uint8_t secondary = secondary_hash(primary);
152 ProbeSequence sequence{static_cast<size_t>(primary), 0, entries_mask};
153 while (true) {
154 size_t pos = sequence.next();
155 Group ctrls = Group::load(&control(pos));
156 IteratableBitMask masks = ctrls.match_byte(secondary);
157 for (size_t i : masks) {
158 size_t index = (pos + i) & entries_mask;
159 ENTRY &entry = this->entry(index);
160 if (LIBC_LIKELY(entry.key != nullptr && strcmp(entry.key, key) == 0))
161 return index;
162 }
163 BitMask available = ctrls.mask_available();
164 // Since there is no deletion, the first time we find an available slot
165 // it is also ready to be used as an insertion point. Therefore, we also
166 // return the first available slot we find. If such entry is empty, the
167 // key will be nullptr.
168 if (LIBC_LIKELY(available.any_bit_set())) {
169 size_t index =
170 (pos + available.lowest_set_bit_nonzero()) & entries_mask;
171 return index;
172 }
173 }
174 }
175
oneshot_hashHashTable176 LIBC_INLINE uint64_t oneshot_hash(const char *key) const {
177 LIBC_NAMESPACE::internal::HashState hasher = state;
178 hasher.update(key, strlen(key));
179 return hasher.finish();
180 }
181
182 // A fast insertion routine without checking if a key already exists.
183 // Nor does the routine check if the table is full.
184 // This is only to be used in grow() where we insert all existing entries
185 // into a new table. Hence, the requirements are naturally satisfied.
unsafe_insertHashTable186 LIBC_INLINE ENTRY *unsafe_insert(ENTRY item) {
187 uint64_t primary = oneshot_hash(item.key);
188 uint8_t secondary = secondary_hash(primary);
189 ProbeSequence sequence{static_cast<size_t>(primary), 0, entries_mask};
190 while (true) {
191 size_t pos = sequence.next();
192 Group ctrls = Group::load(&control(pos));
193 BitMask available = ctrls.mask_available();
194 if (available.any_bit_set()) {
195 size_t index =
196 (pos + available.lowest_set_bit_nonzero()) & entries_mask;
197 set_ctrl(index, secondary);
198 entry(index).key = item.key;
199 entry(index).data = item.data;
200 available_slots--;
201 return &entry(index);
202 }
203 }
204 }
205
growHashTable206 LIBC_INLINE HashTable *grow() const {
207 size_t hint = full_capacity() + 1;
208 HashState state = this->state;
209 // migrate to a new random state
210 state.update(&hint, sizeof(hint));
211 HashTable *new_table = allocate(hint, state.finish());
212 // It is safe to call unsafe_insert() because we know that:
213 // - the new table has enough capacity to hold all the entries
214 // - there is no duplicate key in the old table
215 if (new_table != nullptr)
216 for (ENTRY e : *this)
217 new_table->unsafe_insert(e);
218 return new_table;
219 }
220
insertHashTable221 LIBC_INLINE static ENTRY *insert(HashTable *&table, ENTRY item,
222 uint64_t primary) {
223 auto index = table->find(item.key, primary);
224 auto slot = &table->entry(index);
225 // SVr4 and POSIX.1-2001 specify that action is significant only for
226 // unsuccessful searches, so that an ENTER should not do anything
227 // for a successful search.
228 if (slot->key != nullptr)
229 return slot;
230
231 // if table of full, we try to grow the table
232 if (table->is_full()) {
233 HashTable *new_table = table->grow();
234 // allocation failed, return nullptr to indicate failure
235 if (new_table == nullptr)
236 return nullptr;
237 // resized sccuessfully: clean up the old table and use the new one
238 deallocate(table);
239 table = new_table;
240 // it is still valid to use the fastpath insertion.
241 return table->unsafe_insert(item);
242 }
243
244 table->set_ctrl(index, secondary_hash(primary));
245 slot->key = item.key;
246 slot->data = item.data;
247 table->available_slots--;
248 return slot;
249 }
250
251 public:
deallocateHashTable252 LIBC_INLINE static void deallocate(HashTable *table) {
253 if (table) {
254 void *ptr =
255 reinterpret_cast<uint8_t *>(table) - table->offset_from_entries();
256 operator delete(ptr, std::align_val_t{table_alignment()});
257 }
258 }
259
allocateHashTable260 LIBC_INLINE static HashTable *allocate(size_t capacity, uint64_t randomness) {
261 // check if capacity_to_entries overflows MAX_MEM_SIZE
262 if (capacity > size_t{1} << (8 * sizeof(size_t) - 1 - 3))
263 return nullptr;
264 SafeMemSize entries{capacity_to_entries(capacity)};
265 SafeMemSize entries_size = entries * SafeMemSize{sizeof(ENTRY)};
266 SafeMemSize align_boundary = entries_size.align_up(table_alignment());
267 SafeMemSize ctrl_sizes = entries + SafeMemSize{sizeof(Group)};
268 SafeMemSize header_size{offset_to_groups()};
269 SafeMemSize total_size =
270 (align_boundary + header_size + ctrl_sizes).align_up(table_alignment());
271 if (!total_size.valid())
272 return nullptr;
273 AllocChecker ac;
274
275 void *mem = operator new(total_size, std::align_val_t{table_alignment()},
276 ac);
277
278 HashTable *table = reinterpret_cast<HashTable *>(
279 static_cast<uint8_t *>(mem) + align_boundary);
280 if (ac) {
281 table->entries_mask = entries - 1u;
282 table->available_slots = entries / 8 * 7;
283 table->state = HashState{randomness};
284 memset(&table->control(0), 0x80, ctrl_sizes);
285 memset(mem, 0, table->offset_from_entries());
286 }
287 return table;
288 }
289
290 struct FullTableIterator {
291 size_t current_offset;
292 size_t remaining;
293 IteratableBitMask current_mask;
294 const HashTable &table;
295
296 // It is fine to use remaining to represent the iterator:
297 // - this comparison only happens with the same table
298 // - hashtable will not be mutated during the iteration
299 LIBC_INLINE bool operator==(const FullTableIterator &other) const {
300 return remaining == other.remaining;
301 }
302 LIBC_INLINE bool operator!=(const FullTableIterator &other) const {
303 return remaining != other.remaining;
304 }
305
306 LIBC_INLINE FullTableIterator &operator++() {
307 this->ensure_valid_group();
308 current_mask.remove_lowest_bit();
309 remaining--;
310 return *this;
311 }
312 LIBC_INLINE const ENTRY &operator*() {
313 this->ensure_valid_group();
314 return table.entry(
315 (current_offset + current_mask.lowest_set_bit_nonzero()) &
316 table.entries_mask);
317 }
318
319 private:
ensure_valid_groupHashTable::FullTableIterator320 LIBC_INLINE void ensure_valid_group() {
321 while (!current_mask.any_bit_set()) {
322 current_offset += sizeof(Group);
323 // It is ensured that the load will only happen at aligned boundaries.
324 current_mask =
325 Group::load_aligned(&table.control(current_offset)).occupied();
326 }
327 }
328 };
329
330 using value_type = ENTRY;
331 using iterator = FullTableIterator;
beginHashTable332 iterator begin() const {
333 return {0, full_capacity() - available_slots,
334 Group::load_aligned(&control(0)).occupied(), *this};
335 }
endHashTable336 iterator end() const { return {0, 0, {BitMask{0}}, *this}; }
337
findHashTable338 LIBC_INLINE ENTRY *find(const char *key) {
339 uint64_t primary = oneshot_hash(key);
340 ENTRY &entry = this->entry(find(key, primary));
341 if (entry.key == nullptr)
342 return nullptr;
343 return &entry;
344 }
345
insertHashTable346 LIBC_INLINE static ENTRY *insert(HashTable *&table, ENTRY item) {
347 uint64_t primary = table->oneshot_hash(item.key);
348 return insert(table, item, primary);
349 }
350 };
351 } // namespace internal
352 } // namespace LIBC_NAMESPACE
353
354 #endif // LLVM_LIBC_SRC___SUPPORT_HASHTABLE_TABLE_H
355