1 // Copyright 2025 The Abseil Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 //
15 // This file contains the implementation of the hashtable control bytes
16 // manipulation.
17
18 #ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
19 #define ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
20
21 #include <cassert>
22 #include <cstddef>
23 #include <cstdint>
24 #include <type_traits>
25
26 #include "absl/base/config.h"
27
28 #ifdef ABSL_INTERNAL_HAVE_SSE2
29 #include <emmintrin.h>
30 #endif
31
32 #ifdef ABSL_INTERNAL_HAVE_SSSE3
33 #include <tmmintrin.h>
34 #endif
35
36 #ifdef _MSC_VER
37 #include <intrin.h>
38 #endif
39
40 #ifdef ABSL_INTERNAL_HAVE_ARM_NEON
41 #include <arm_neon.h>
42 #endif
43
44 #include "absl/base/optimization.h"
45 #include "absl/numeric/bits.h"
46 #include "absl/base/internal/endian.h"
47
48 namespace absl {
49 ABSL_NAMESPACE_BEGIN
50 namespace container_internal {
51
52 #ifdef ABSL_SWISSTABLE_ASSERT
53 #error ABSL_SWISSTABLE_ASSERT cannot be directly set
54 #else
55 // We use this macro for assertions that users may see when the table is in an
56 // invalid state that sanitizers may help diagnose.
57 #define ABSL_SWISSTABLE_ASSERT(CONDITION) \
58 assert((CONDITION) && "Try enabling sanitizers.")
59 #endif
60
61
62 template <typename T>
TrailingZeros(T x)63 uint32_t TrailingZeros(T x) {
64 ABSL_ASSUME(x != 0);
65 return static_cast<uint32_t>(countr_zero(x));
66 }
67
68 // 8 bytes bitmask with most significant bit set for every byte.
69 constexpr uint64_t kMsbs8Bytes = 0x8080808080808080ULL;
70 // 8 kEmpty bytes that is useful for small table initialization.
71 constexpr uint64_t k8EmptyBytes = kMsbs8Bytes;
72
73 // An abstract bitmask, such as that emitted by a SIMD instruction.
74 //
75 // Specifically, this type implements a simple bitset whose representation is
76 // controlled by `SignificantBits` and `Shift`. `SignificantBits` is the number
77 // of abstract bits in the bitset, while `Shift` is the log-base-two of the
78 // width of an abstract bit in the representation.
79 // This mask provides operations for any number of real bits set in an abstract
80 // bit. To add iteration on top of that, implementation must guarantee no more
81 // than the most significant real bit is set in a set abstract bit.
82 template <class T, int SignificantBits, int Shift = 0>
83 class NonIterableBitMask {
84 public:
NonIterableBitMask(T mask)85 explicit NonIterableBitMask(T mask) : mask_(mask) {}
86
87 explicit operator bool() const { return this->mask_ != 0; }
88
89 // Returns the index of the lowest *abstract* bit set in `self`.
LowestBitSet()90 uint32_t LowestBitSet() const {
91 return container_internal::TrailingZeros(mask_) >> Shift;
92 }
93
94 // Returns the index of the highest *abstract* bit set in `self`.
HighestBitSet()95 uint32_t HighestBitSet() const {
96 return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
97 }
98
99 // Returns the number of trailing zero *abstract* bits.
TrailingZeros()100 uint32_t TrailingZeros() const {
101 return container_internal::TrailingZeros(mask_) >> Shift;
102 }
103
104 // Returns the number of leading zero *abstract* bits.
LeadingZeros()105 uint32_t LeadingZeros() const {
106 constexpr int total_significant_bits = SignificantBits << Shift;
107 constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
108 return static_cast<uint32_t>(
109 countl_zero(static_cast<T>(mask_ << extra_bits))) >>
110 Shift;
111 }
112
113 T mask_;
114 };
115
116 // Mask that can be iterable
117 //
118 // For example, when `SignificantBits` is 16 and `Shift` is zero, this is just
119 // an ordinary 16-bit bitset occupying the low 16 bits of `mask`. When
120 // `SignificantBits` is 8 and `Shift` is 3, abstract bits are represented as
121 // the bytes `0x00` and `0x80`, and it occupies all 64 bits of the bitmask.
122 // If NullifyBitsOnIteration is true (only allowed for Shift == 3),
123 // non zero abstract bit is allowed to have additional bits
124 // (e.g., `0xff`, `0x83` and `0x9c` are ok, but `0x6f` is not).
125 //
126 // For example:
127 // for (int i : BitMask<uint32_t, 16>(0b101)) -> yields 0, 2
128 // for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
129 template <class T, int SignificantBits, int Shift = 0,
130 bool NullifyBitsOnIteration = false>
131 class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
132 using Base = NonIterableBitMask<T, SignificantBits, Shift>;
133 static_assert(std::is_unsigned<T>::value, "");
134 static_assert(Shift == 0 || Shift == 3, "");
135 static_assert(!NullifyBitsOnIteration || Shift == 3, "");
136
137 public:
BitMask(T mask)138 explicit BitMask(T mask) : Base(mask) {
139 if (Shift == 3 && !NullifyBitsOnIteration) {
140 ABSL_SWISSTABLE_ASSERT(this->mask_ == (this->mask_ & kMsbs8Bytes));
141 }
142 }
143 // BitMask is an iterator over the indices of its abstract bits.
144 using value_type = int;
145 using iterator = BitMask;
146 using const_iterator = BitMask;
147
148 BitMask& operator++() {
149 if (Shift == 3 && NullifyBitsOnIteration) {
150 this->mask_ &= kMsbs8Bytes;
151 }
152 this->mask_ &= (this->mask_ - 1);
153 return *this;
154 }
155
156 uint32_t operator*() const { return Base::LowestBitSet(); }
157
begin()158 BitMask begin() const { return *this; }
end()159 BitMask end() const { return BitMask(0); }
160
161 private:
162 friend bool operator==(const BitMask& a, const BitMask& b) {
163 return a.mask_ == b.mask_;
164 }
165 friend bool operator!=(const BitMask& a, const BitMask& b) {
166 return a.mask_ != b.mask_;
167 }
168 };
169
170 using h2_t = uint8_t;
171
172 // The values here are selected for maximum performance. See the static asserts
173 // below for details.
174
175 // A `ctrl_t` is a single control byte, which can have one of four
176 // states: empty, deleted, full (which has an associated seven-bit h2_t value)
177 // and the sentinel. They have the following bit patterns:
178 //
179 // empty: 1 0 0 0 0 0 0 0
180 // deleted: 1 1 1 1 1 1 1 0
181 // full: 0 h h h h h h h // h represents the hash bits.
182 // sentinel: 1 1 1 1 1 1 1 1
183 //
184 // These values are specifically tuned for SSE-flavored SIMD.
185 // The static_asserts below detail the source of these choices.
186 //
187 // We use an enum class so that when strict aliasing is enabled, the compiler
188 // knows ctrl_t doesn't alias other types.
189 enum class ctrl_t : int8_t {
190 kEmpty = -128, // 0b10000000
191 kDeleted = -2, // 0b11111110
192 kSentinel = -1, // 0b11111111
193 };
194 static_assert(
195 (static_cast<int8_t>(ctrl_t::kEmpty) &
196 static_cast<int8_t>(ctrl_t::kDeleted) &
197 static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
198 "Special markers need to have the MSB to make checking for them efficient");
199 static_assert(
200 ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
201 "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
202 "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
203 static_assert(
204 ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
205 "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
206 "registers (pcmpeqd xmm, xmm)");
207 static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
208 "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
209 "existence efficient (psignb xmm, xmm)");
210 static_assert(
211 (~static_cast<int8_t>(ctrl_t::kEmpty) &
212 ~static_cast<int8_t>(ctrl_t::kDeleted) &
213 static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
214 "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
215 "shared by ctrl_t::kSentinel to make the scalar test for "
216 "MaskEmptyOrDeleted() efficient");
217 static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
218 "ctrl_t::kDeleted must be -2 to make the implementation of "
219 "ConvertSpecialToEmptyAndFullToDeleted efficient");
220
221 // Helpers for checking the state of a control byte.
IsEmpty(ctrl_t c)222 inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
IsFull(ctrl_t c)223 inline bool IsFull(ctrl_t c) {
224 // Cast `c` to the underlying type instead of casting `0` to `ctrl_t` as `0`
225 // is not a value in the enum. Both ways are equivalent, but this way makes
226 // linters happier.
227 return static_cast<std::underlying_type_t<ctrl_t>>(c) >= 0;
228 }
IsDeleted(ctrl_t c)229 inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
IsEmptyOrDeleted(ctrl_t c)230 inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
231
232 #ifdef ABSL_INTERNAL_HAVE_SSE2
233 // Quick reference guide for intrinsics used below:
234 //
235 // * __m128i: An XMM (128-bit) word.
236 //
237 // * _mm_setzero_si128: Returns a zero vector.
238 // * _mm_set1_epi8: Returns a vector with the same i8 in each lane.
239 //
240 // * _mm_subs_epi8: Saturating-subtracts two i8 vectors.
241 // * _mm_and_si128: Ands two i128s together.
242 // * _mm_or_si128: Ors two i128s together.
243 // * _mm_andnot_si128: And-nots two i128s together.
244 //
245 // * _mm_cmpeq_epi8: Component-wise compares two i8 vectors for equality,
246 // filling each lane with 0x00 or 0xff.
247 // * _mm_cmpgt_epi8: Same as above, but using > rather than ==.
248 //
249 // * _mm_loadu_si128: Performs an unaligned load of an i128.
250 // * _mm_storeu_si128: Performs an unaligned store of an i128.
251 //
252 // * _mm_sign_epi8: Retains, negates, or zeroes each i8 lane of the first
253 // argument if the corresponding lane of the second
254 // argument is positive, negative, or zero, respectively.
255 // * _mm_movemask_epi8: Selects the sign bit out of each i8 lane and produces a
256 // bitmask consisting of those bits.
257 // * _mm_shuffle_epi8: Selects i8s from the first argument, using the low
258 // four bits of each i8 lane in the second argument as
259 // indices.
260
261 // https://github.com/abseil/abseil-cpp/issues/209
262 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
263 // _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
264 // Work around this by using the portable implementation of Group
265 // when using -funsigned-char under GCC.
_mm_cmpgt_epi8_fixed(__m128i a,__m128i b)266 inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
267 #if defined(__GNUC__) && !defined(__clang__)
268 if (std::is_unsigned<char>::value) {
269 const __m128i mask = _mm_set1_epi8(0x80);
270 const __m128i diff = _mm_subs_epi8(b, a);
271 return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
272 }
273 #endif
274 return _mm_cmpgt_epi8(a, b);
275 }
276
277 struct GroupSse2Impl {
278 static constexpr size_t kWidth = 16; // the number of slots per group
279 using BitMaskType = BitMask<uint16_t, kWidth>;
280 using NonIterableBitMaskType = NonIterableBitMask<uint16_t, kWidth>;
281
GroupSse2ImplGroupSse2Impl282 explicit GroupSse2Impl(const ctrl_t* pos) {
283 ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
284 }
285
286 // Returns a bitmask representing the positions of slots that match hash.
MatchGroupSse2Impl287 BitMaskType Match(h2_t hash) const {
288 auto match = _mm_set1_epi8(static_cast<char>(hash));
289 return BitMaskType(
290 static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
291 }
292
293 // Returns a bitmask representing the positions of empty slots.
MaskEmptyGroupSse2Impl294 NonIterableBitMaskType MaskEmpty() const {
295 #ifdef ABSL_INTERNAL_HAVE_SSSE3
296 // This only works because ctrl_t::kEmpty is -128.
297 return NonIterableBitMaskType(
298 static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
299 #else
300 auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
301 return NonIterableBitMaskType(
302 static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
303 #endif
304 }
305
306 // Returns a bitmask representing the positions of full slots.
307 // Note: for `is_small()` tables group may contain the "same" slot twice:
308 // original and mirrored.
MaskFullGroupSse2Impl309 BitMaskType MaskFull() const {
310 return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
311 }
312
313 // Returns a bitmask representing the positions of non full slots.
314 // Note: this includes: kEmpty, kDeleted, kSentinel.
315 // It is useful in contexts when kSentinel is not present.
MaskNonFullGroupSse2Impl316 auto MaskNonFull() const {
317 return BitMaskType(static_cast<uint16_t>(_mm_movemask_epi8(ctrl)));
318 }
319
320 // Returns a bitmask representing the positions of empty or deleted slots.
MaskEmptyOrDeletedGroupSse2Impl321 NonIterableBitMaskType MaskEmptyOrDeleted() const {
322 auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
323 return NonIterableBitMaskType(static_cast<uint16_t>(
324 _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
325 }
326
327 // Returns the number of trailing empty or deleted elements in the group.
CountLeadingEmptyOrDeletedGroupSse2Impl328 uint32_t CountLeadingEmptyOrDeleted() const {
329 auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
330 return TrailingZeros(static_cast<uint32_t>(
331 _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
332 }
333
ConvertSpecialToEmptyAndFullToDeletedGroupSse2Impl334 void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
335 auto msbs = _mm_set1_epi8(static_cast<char>(-128));
336 auto x126 = _mm_set1_epi8(126);
337 #ifdef ABSL_INTERNAL_HAVE_SSSE3
338 auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
339 #else
340 auto zero = _mm_setzero_si128();
341 auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
342 auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
343 #endif
344 _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
345 }
346
347 __m128i ctrl;
348 };
349 #endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
350
351 #if defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
352 struct GroupAArch64Impl {
353 static constexpr size_t kWidth = 8;
354 using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3,
355 /*NullifyBitsOnIteration=*/true>;
356 using NonIterableBitMaskType =
357 NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>;
358
GroupAArch64ImplGroupAArch64Impl359 explicit GroupAArch64Impl(const ctrl_t* pos) {
360 ctrl = vld1_u8(reinterpret_cast<const uint8_t*>(pos));
361 }
362
MatchGroupAArch64Impl363 auto Match(h2_t hash) const {
364 uint8x8_t dup = vdup_n_u8(hash);
365 auto mask = vceq_u8(ctrl, dup);
366 return BitMaskType(vget_lane_u64(vreinterpret_u64_u8(mask), 0));
367 }
368
MaskEmptyGroupAArch64Impl369 auto MaskEmpty() const {
370 uint64_t mask =
371 vget_lane_u64(vreinterpret_u64_u8(vceq_s8(
372 vdup_n_s8(static_cast<int8_t>(ctrl_t::kEmpty)),
373 vreinterpret_s8_u8(ctrl))),
374 0);
375 return NonIterableBitMaskType(mask);
376 }
377
378 // Returns a bitmask representing the positions of full slots.
379 // Note: for `is_small()` tables group may contain the "same" slot twice:
380 // original and mirrored.
MaskFullGroupAArch64Impl381 auto MaskFull() const {
382 uint64_t mask = vget_lane_u64(
383 vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
384 vdup_n_s8(static_cast<int8_t>(0)))),
385 0);
386 return BitMaskType(mask);
387 }
388
389 // Returns a bitmask representing the positions of non full slots.
390 // Note: this includes: kEmpty, kDeleted, kSentinel.
391 // It is useful in contexts when kSentinel is not present.
MaskNonFullGroupAArch64Impl392 auto MaskNonFull() const {
393 uint64_t mask = vget_lane_u64(
394 vreinterpret_u64_u8(vclt_s8(vreinterpret_s8_u8(ctrl),
395 vdup_n_s8(static_cast<int8_t>(0)))),
396 0);
397 return BitMaskType(mask);
398 }
399
MaskEmptyOrDeletedGroupAArch64Impl400 auto MaskEmptyOrDeleted() const {
401 uint64_t mask =
402 vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
403 vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
404 vreinterpret_s8_u8(ctrl))),
405 0);
406 return NonIterableBitMaskType(mask);
407 }
408
CountLeadingEmptyOrDeletedGroupAArch64Impl409 uint32_t CountLeadingEmptyOrDeleted() const {
410 uint64_t mask =
411 vget_lane_u64(vreinterpret_u64_u8(vcle_s8(
412 vdup_n_s8(static_cast<int8_t>(ctrl_t::kSentinel)),
413 vreinterpret_s8_u8(ctrl))),
414 0);
415 // Similar to MaskEmptyorDeleted() but we invert the logic to invert the
416 // produced bitfield. We then count number of trailing zeros.
417 // Clang and GCC optimize countr_zero to rbit+clz without any check for 0,
418 // so we should be fine.
419 return static_cast<uint32_t>(countr_zero(mask)) >> 3;
420 }
421
ConvertSpecialToEmptyAndFullToDeletedGroupAArch64Impl422 void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
423 uint64_t mask = vget_lane_u64(vreinterpret_u64_u8(ctrl), 0);
424 constexpr uint64_t slsbs = 0x0202020202020202ULL;
425 constexpr uint64_t midbs = 0x7e7e7e7e7e7e7e7eULL;
426 auto x = slsbs & (mask >> 6);
427 auto res = (x + midbs) | kMsbs8Bytes;
428 little_endian::Store64(dst, res);
429 }
430
431 uint8x8_t ctrl;
432 };
433 #endif // ABSL_INTERNAL_HAVE_ARM_NEON && ABSL_IS_LITTLE_ENDIAN
434
435 struct GroupPortableImpl {
436 static constexpr size_t kWidth = 8;
437 using BitMaskType = BitMask<uint64_t, kWidth, /*Shift=*/3,
438 /*NullifyBitsOnIteration=*/false>;
439 using NonIterableBitMaskType =
440 NonIterableBitMask<uint64_t, kWidth, /*Shift=*/3>;
441
GroupPortableImplGroupPortableImpl442 explicit GroupPortableImpl(const ctrl_t* pos)
443 : ctrl(little_endian::Load64(pos)) {}
444
MatchGroupPortableImpl445 BitMaskType Match(h2_t hash) const {
446 // For the technique, see:
447 // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
448 // (Determine if a word has a byte equal to n).
449 //
450 // Caveat: there are false positives but:
451 // - they only occur if there is a real match
452 // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
453 // - they will be handled gracefully by subsequent checks in code
454 //
455 // Example:
456 // v = 0x1716151413121110
457 // hash = 0x12
458 // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
459 constexpr uint64_t lsbs = 0x0101010101010101ULL;
460 auto x = ctrl ^ (lsbs * hash);
461 return BitMaskType((x - lsbs) & ~x & kMsbs8Bytes);
462 }
463
MaskEmptyGroupPortableImpl464 auto MaskEmpty() const {
465 return NonIterableBitMaskType((ctrl & ~(ctrl << 6)) & kMsbs8Bytes);
466 }
467
468 // Returns a bitmask representing the positions of full slots.
469 // Note: for `is_small()` tables group may contain the "same" slot twice:
470 // original and mirrored.
MaskFullGroupPortableImpl471 auto MaskFull() const {
472 return BitMaskType((ctrl ^ kMsbs8Bytes) & kMsbs8Bytes);
473 }
474
475 // Returns a bitmask representing the positions of non full slots.
476 // Note: this includes: kEmpty, kDeleted, kSentinel.
477 // It is useful in contexts when kSentinel is not present.
MaskNonFullGroupPortableImpl478 auto MaskNonFull() const { return BitMaskType(ctrl & kMsbs8Bytes); }
479
MaskEmptyOrDeletedGroupPortableImpl480 auto MaskEmptyOrDeleted() const {
481 return NonIterableBitMaskType((ctrl & ~(ctrl << 7)) & kMsbs8Bytes);
482 }
483
CountLeadingEmptyOrDeletedGroupPortableImpl484 uint32_t CountLeadingEmptyOrDeleted() const {
485 // ctrl | ~(ctrl >> 7) will have the lowest bit set to zero for kEmpty and
486 // kDeleted. We lower all other bits and count number of trailing zeros.
487 constexpr uint64_t bits = 0x0101010101010101ULL;
488 return static_cast<uint32_t>(countr_zero((ctrl | ~(ctrl >> 7)) & bits) >>
489 3);
490 }
491
ConvertSpecialToEmptyAndFullToDeletedGroupPortableImpl492 void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
493 constexpr uint64_t lsbs = 0x0101010101010101ULL;
494 auto x = ctrl & kMsbs8Bytes;
495 auto res = (~x + (x >> 7)) & ~lsbs;
496 little_endian::Store64(dst, res);
497 }
498
499 uint64_t ctrl;
500 };
501
502 #ifdef ABSL_INTERNAL_HAVE_SSE2
503 using Group = GroupSse2Impl;
504 using GroupFullEmptyOrDeleted = GroupSse2Impl;
505 #elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
506 using Group = GroupAArch64Impl;
507 // For Aarch64, we use the portable implementation for counting and masking
508 // full, empty or deleted group elements. This is to avoid the latency of moving
509 // between data GPRs and Neon registers when it does not provide a benefit.
510 // Using Neon is profitable when we call Match(), but is not when we don't,
511 // which is the case when we do *EmptyOrDeleted and MaskFull operations.
512 // It is difficult to make a similar approach beneficial on other architectures
513 // such as x86 since they have much lower GPR <-> vector register transfer
514 // latency and 16-wide Groups.
515 using GroupFullEmptyOrDeleted = GroupPortableImpl;
516 #else
517 using Group = GroupPortableImpl;
518 using GroupFullEmptyOrDeleted = GroupPortableImpl;
519 #endif
520
521 } // namespace container_internal
522 ABSL_NAMESPACE_END
523 } // namespace absl
524
525 #undef ABSL_SWISSTABLE_ASSERT
526
527 #endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_CONTROL_BYTES_H_
528