1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
6 #define V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
7
8 #include <algorithm>
9
10 #include "src/base/macros.h"
11 #include "src/base/optional.h"
12 #include "src/execution/isolate-utils-inl.h"
13 #include "src/heap/heap.h"
14 #include "src/objects/fixed-array-inl.h"
15 #include "src/objects/instance-type-inl.h"
16 #include "src/objects/js-collection-iterator.h"
17 #include "src/objects/objects-inl.h"
18 #include "src/objects/smi.h"
19 #include "src/objects/swiss-name-dictionary.h"
20
21 // Has to be the last include (doesn't have include guards):
22 #include "src/objects/object-macros.h"
23
24 namespace v8 {
25 namespace internal {
26
27 #include "torque-generated/src/objects/swiss-name-dictionary-tq-inl.inc"
28
29 CAST_ACCESSOR(SwissNameDictionary)
OBJECT_CONSTRUCTORS_IMPL(SwissNameDictionary,HeapObject)30 OBJECT_CONSTRUCTORS_IMPL(SwissNameDictionary, HeapObject)
31
32 swiss_table::ctrl_t* SwissNameDictionary::CtrlTable() {
33 return reinterpret_cast<ctrl_t*>(
34 field_address(CtrlTableStartOffset(Capacity())));
35 }
36
PropertyDetailsTable()37 uint8_t* SwissNameDictionary::PropertyDetailsTable() {
38 return reinterpret_cast<uint8_t*>(
39 field_address(PropertyDetailsTableStartOffset(Capacity())));
40 }
41
Capacity()42 int SwissNameDictionary::Capacity() {
43 return ReadField<int32_t>(CapacityOffset());
44 }
45
SetCapacity(int capacity)46 void SwissNameDictionary::SetCapacity(int capacity) {
47 DCHECK(IsValidCapacity(capacity));
48
49 WriteField<int32_t>(CapacityOffset(), capacity);
50 }
51
NumberOfElements()52 int SwissNameDictionary::NumberOfElements() {
53 return GetMetaTableField(kMetaTableElementCountFieldIndex);
54 }
55
NumberOfDeletedElements()56 int SwissNameDictionary::NumberOfDeletedElements() {
57 return GetMetaTableField(kMetaTableDeletedElementCountFieldIndex);
58 }
59
SetNumberOfElements(int elements)60 void SwissNameDictionary::SetNumberOfElements(int elements) {
61 SetMetaTableField(kMetaTableElementCountFieldIndex, elements);
62 }
63
SetNumberOfDeletedElements(int deleted_elements)64 void SwissNameDictionary::SetNumberOfDeletedElements(int deleted_elements) {
65 SetMetaTableField(kMetaTableDeletedElementCountFieldIndex, deleted_elements);
66 }
67
UsedCapacity()68 int SwissNameDictionary::UsedCapacity() {
69 return NumberOfElements() + NumberOfDeletedElements();
70 }
71
72 // static
IsValidCapacity(int capacity)73 constexpr bool SwissNameDictionary::IsValidCapacity(int capacity) {
74 return capacity == 0 || (capacity >= kInitialCapacity &&
75 // Must be power of 2.
76 ((capacity & (capacity - 1)) == 0));
77 }
78
79 // static
DataTableSize(int capacity)80 constexpr int SwissNameDictionary::DataTableSize(int capacity) {
81 return capacity * kTaggedSize * kDataTableEntryCount;
82 }
83
84 // static
CtrlTableSize(int capacity)85 constexpr int SwissNameDictionary::CtrlTableSize(int capacity) {
86 // Doing + |kGroupWidth| due to the copy of first group at the end of control
87 // table.
88 return (capacity + kGroupWidth) * kOneByteSize;
89 }
90
91 // static
SizeFor(int capacity)92 constexpr int SwissNameDictionary::SizeFor(int capacity) {
93 DCHECK(IsValidCapacity(capacity));
94 return PropertyDetailsTableStartOffset(capacity) + capacity;
95 }
96
97 // We use 7/8th as maximum load factor for non-special cases.
98 // For 16-wide groups, that gives an average of two empty slots per group.
99 // Similar to Abseil's CapacityToGrowth.
100 // static
MaxUsableCapacity(int capacity)101 constexpr int SwissNameDictionary::MaxUsableCapacity(int capacity) {
102 DCHECK(IsValidCapacity(capacity));
103
104 if (Group::kWidth == 8 && capacity == 4) {
105 // If the group size is 16 we can fully utilize capacity 4: There will be
106 // enough kEmpty entries in the ctrl table.
107 return 3;
108 }
109 return capacity - capacity / 8;
110 }
111
112 // Returns |at_least_space_for| * 8/7 for non-special cases. Similar to Abseil's
113 // GrowthToLowerboundCapacity.
114 // static
CapacityFor(int at_least_space_for)115 int SwissNameDictionary::CapacityFor(int at_least_space_for) {
116 if (at_least_space_for <= 4) {
117 if (at_least_space_for == 0) {
118 return 0;
119 } else if (at_least_space_for < 4) {
120 return 4;
121 } else if (kGroupWidth == 16) {
122 DCHECK_EQ(4, at_least_space_for);
123 return 4;
124 } else if (kGroupWidth == 8) {
125 DCHECK_EQ(4, at_least_space_for);
126 return 8;
127 }
128 }
129
130 int non_normalized = at_least_space_for + at_least_space_for / 7;
131 return base::bits::RoundUpToPowerOfTwo32(non_normalized);
132 }
133
EntryForEnumerationIndex(int enumeration_index)134 int SwissNameDictionary::EntryForEnumerationIndex(int enumeration_index) {
135 DCHECK_LT(enumeration_index, UsedCapacity());
136 return GetMetaTableField(kMetaTableEnumerationDataStartIndex +
137 enumeration_index);
138 }
139
SetEntryForEnumerationIndex(int enumeration_index,int entry)140 void SwissNameDictionary::SetEntryForEnumerationIndex(int enumeration_index,
141 int entry) {
142 DCHECK_LT(enumeration_index, UsedCapacity());
143 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
144 DCHECK(IsFull(GetCtrl(entry)));
145
146 SetMetaTableField(kMetaTableEnumerationDataStartIndex + enumeration_index,
147 entry);
148 }
149
150 template <typename IsolateT>
FindEntry(IsolateT * isolate,Object key)151 InternalIndex SwissNameDictionary::FindEntry(IsolateT* isolate, Object key) {
152 Name name = Name::cast(key);
153 DCHECK(name.IsUniqueName());
154 uint32_t hash = name.hash();
155
156 // We probe the hash table in groups of |kGroupWidth| buckets. One bucket
157 // corresponds to a 1-byte entry in the control table.
158 // Each group can be uniquely identified by the index of its first bucket,
159 // which must be a value between 0 (inclusive) and Capacity() (exclusive).
160 // Note that logically, groups wrap around after index Capacity() - 1. This
161 // means that probing the group starting at, for example, index Capacity() - 1
162 // means probing CtrlTable()[Capacity() - 1] followed by CtrlTable()[0] to
163 // CtrlTable()[6], assuming a group width of 8. However, in memory, this is
164 // achieved by maintaining an additional |kGroupWidth| bytes after the first
165 // Capacity() entries of the control table. These contain a copy of the first
166 // max(Capacity(), kGroupWidth) entries of the control table. If Capacity() <
167 // |kGroupWidth|, then the remaining |kGroupWidth| - Capacity() control bytes
168 // are left as |kEmpty|.
169 // This means that actually, probing the group starting
170 // at index Capacity() - 1 is achieved by probing CtrlTable()[Capacity() - 1],
171 // followed by CtrlTable()[Capacity()] to CtrlTable()[Capacity() + 7].
172
173 ctrl_t* ctrl = CtrlTable();
174 auto seq = probe(hash, Capacity());
175 // At this point, seq.offset() denotes the index of the first bucket in the
176 // first group to probe. Note that this doesn't have to be divisible by
177 // |kGroupWidth|, but can have any value between 0 (inclusive) and Capacity()
178 // (exclusive).
179 while (true) {
180 Group g{ctrl + seq.offset()};
181 for (int i : g.Match(swiss_table::H2(hash))) {
182 int candidate_entry = seq.offset(i);
183 Object candidate_key = KeyAt(candidate_entry);
184 // This key matching is SwissNameDictionary specific!
185 if (candidate_key == key) return InternalIndex(candidate_entry);
186 }
187 if (g.MatchEmpty()) return InternalIndex::NotFound();
188
189 // The following selects the next group to probe. Note that seq.offset()
190 // always advances by a multiple of |kGroupWidth|, modulo Capacity(). This
191 // is done in a way such that we visit Capacity() / |kGroupWidth|
192 // non-overlapping (!) groups before we would visit the same group (or
193 // bucket) again.
194 seq.next();
195
196 // If the following DCHECK weren't true, we would have probed all Capacity()
197 // different buckets without finding one containing |kEmpty| (which would
198 // haved triggered the g.MatchEmpty() check above). This must not be the
199 // case because the maximum load factor of 7/8 guarantees that there must
200 // always remain empty buckets.
201 //
202 // The only exception from this rule are small tables, where 2 * Capacity()
203 // < |kGroupWidth|, in which case all Capacity() entries can be filled
204 // without leaving empty buckets. The layout of the control
205 // table guarantees that after the first Capacity() entries of the control
206 // table, the control table contains a copy of those first Capacity()
207 // entries, followed by kGroupWidth - 2 * Capacity() entries containing
208 // |kEmpty|. This guarantees that the g.MatchEmpty() check above will
209 // always trigger if the element wasn't found, correctly preventing us from
210 // probing more than one group in this special case.
211 DCHECK_LT(seq.index(), Capacity());
212 }
213 }
214
215 template <typename IsolateT>
FindEntry(IsolateT * isolate,Handle<Object> key)216 InternalIndex SwissNameDictionary::FindEntry(IsolateT* isolate,
217 Handle<Object> key) {
218 return FindEntry(isolate, *key);
219 }
220
LoadFromDataTable(int entry,int data_offset)221 Object SwissNameDictionary::LoadFromDataTable(int entry, int data_offset) {
222 return LoadFromDataTable(GetPtrComprCageBase(*this), entry, data_offset);
223 }
224
LoadFromDataTable(PtrComprCageBase cage_base,int entry,int data_offset)225 Object SwissNameDictionary::LoadFromDataTable(PtrComprCageBase cage_base,
226 int entry, int data_offset) {
227 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
228 int offset = DataTableStartOffset() +
229 (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
230 return TaggedField<Object>::Relaxed_Load(cage_base, *this, offset);
231 }
232
StoreToDataTable(int entry,int data_offset,Object data)233 void SwissNameDictionary::StoreToDataTable(int entry, int data_offset,
234 Object data) {
235 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
236
237 int offset = DataTableStartOffset() +
238 (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
239
240 RELAXED_WRITE_FIELD(*this, offset, data);
241 WRITE_BARRIER(*this, offset, data);
242 }
243
StoreToDataTableNoBarrier(int entry,int data_offset,Object data)244 void SwissNameDictionary::StoreToDataTableNoBarrier(int entry, int data_offset,
245 Object data) {
246 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
247
248 int offset = DataTableStartOffset() +
249 (entry * kDataTableEntryCount + data_offset) * kTaggedSize;
250
251 RELAXED_WRITE_FIELD(*this, offset, data);
252 }
253
ClearDataTableEntry(Isolate * isolate,int entry)254 void SwissNameDictionary::ClearDataTableEntry(Isolate* isolate, int entry) {
255 ReadOnlyRoots roots(isolate);
256
257 StoreToDataTable(entry, kDataTableKeyEntryIndex, roots.the_hole_value());
258 StoreToDataTable(entry, kDataTableValueEntryIndex, roots.the_hole_value());
259 }
260
ValueAtPut(int entry,Object value)261 void SwissNameDictionary::ValueAtPut(int entry, Object value) {
262 DCHECK(!value.IsTheHole());
263 StoreToDataTable(entry, kDataTableValueEntryIndex, value);
264 }
265
ValueAtPut(InternalIndex entry,Object value)266 void SwissNameDictionary::ValueAtPut(InternalIndex entry, Object value) {
267 ValueAtPut(entry.as_int(), value);
268 }
269
SetKey(int entry,Object key)270 void SwissNameDictionary::SetKey(int entry, Object key) {
271 DCHECK(!key.IsTheHole());
272 StoreToDataTable(entry, kDataTableKeyEntryIndex, key);
273 }
274
DetailsAtPut(int entry,PropertyDetails details)275 void SwissNameDictionary::DetailsAtPut(int entry, PropertyDetails details) {
276 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
277 uint8_t encoded_details = details.ToByte();
278 PropertyDetailsTable()[entry] = encoded_details;
279 }
280
DetailsAtPut(InternalIndex entry,PropertyDetails details)281 void SwissNameDictionary::DetailsAtPut(InternalIndex entry,
282 PropertyDetails details) {
283 DetailsAtPut(entry.as_int(), details);
284 }
285
KeyAt(int entry)286 Object SwissNameDictionary::KeyAt(int entry) {
287 return LoadFromDataTable(entry, kDataTableKeyEntryIndex);
288 }
289
KeyAt(InternalIndex entry)290 Object SwissNameDictionary::KeyAt(InternalIndex entry) {
291 return KeyAt(entry.as_int());
292 }
293
NameAt(InternalIndex entry)294 Name SwissNameDictionary::NameAt(InternalIndex entry) {
295 return Name::cast(KeyAt(entry));
296 }
297
298 // This version can be called on empty buckets.
ValueAtRaw(int entry)299 Object SwissNameDictionary::ValueAtRaw(int entry) {
300 return LoadFromDataTable(entry, kDataTableValueEntryIndex);
301 }
302
ValueAt(InternalIndex entry)303 Object SwissNameDictionary::ValueAt(InternalIndex entry) {
304 DCHECK(IsFull(GetCtrl(entry.as_int())));
305 return ValueAtRaw(entry.as_int());
306 }
307
TryValueAt(InternalIndex entry)308 base::Optional<Object> SwissNameDictionary::TryValueAt(InternalIndex entry) {
309 #if DEBUG
310 Isolate* isolate;
311 GetIsolateFromHeapObject(*this, &isolate);
312 DCHECK_NE(isolate, nullptr);
313 SLOW_DCHECK(!isolate->heap()->IsPendingAllocation(*this));
314 #endif // DEBUG
315 // We can read Capacity() in a non-atomic way since we are reading an
316 // initialized object which is not pending allocation.
317 if (static_cast<unsigned>(entry.as_int()) >=
318 static_cast<unsigned>(Capacity())) {
319 return {};
320 }
321 return ValueAtRaw(entry.as_int());
322 }
323
DetailsAt(int entry)324 PropertyDetails SwissNameDictionary::DetailsAt(int entry) {
325 // GetCtrl(entry) does a bounds check for |entry| value.
326 DCHECK(IsFull(GetCtrl(entry)));
327
328 uint8_t encoded_details = PropertyDetailsTable()[entry];
329 return PropertyDetails::FromByte(encoded_details);
330 }
331
DetailsAt(InternalIndex entry)332 PropertyDetails SwissNameDictionary::DetailsAt(InternalIndex entry) {
333 return DetailsAt(entry.as_int());
334 }
335
336 // static
337 template <typename IsolateT>
EnsureGrowable(IsolateT * isolate,Handle<SwissNameDictionary> table)338 Handle<SwissNameDictionary> SwissNameDictionary::EnsureGrowable(
339 IsolateT* isolate, Handle<SwissNameDictionary> table) {
340 int capacity = table->Capacity();
341
342 if (table->UsedCapacity() < MaxUsableCapacity(capacity)) {
343 // We have room for at least one more entry, nothing to do.
344 return table;
345 }
346
347 int new_capacity = capacity == 0 ? kInitialCapacity : capacity * 2;
348 return Rehash(isolate, table, new_capacity);
349 }
350
GetCtrl(int entry)351 swiss_table::ctrl_t SwissNameDictionary::GetCtrl(int entry) {
352 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(Capacity()));
353
354 return CtrlTable()[entry];
355 }
356
SetCtrl(int entry,ctrl_t h)357 void SwissNameDictionary::SetCtrl(int entry, ctrl_t h) {
358 int capacity = Capacity();
359 DCHECK_LT(static_cast<unsigned>(entry), static_cast<unsigned>(capacity));
360
361 ctrl_t* ctrl = CtrlTable();
362 ctrl[entry] = h;
363
364 // The ctrl table contains a copy of the first group (i.e., the group starting
365 // at entry 0) after the first |capacity| entries of the ctrl table. This
366 // means that the ctrl table always has size |capacity| + |kGroupWidth|.
367 // However, note that we may have |capacity| < |kGroupWidth|. For example, if
368 // Capacity() == 8 and |kGroupWidth| == 16, then ctrl[0] is copied to ctrl[8],
369 // ctrl[1] to ctrl[9], etc. In this case, ctrl[16] to ctrl[23] remain unused,
370 // which means that their values are always Ctrl::kEmpty.
371 // We achieve the necessary copying without branching here using some bit
372 // magic: We set {copy_entry = entry} in those cases where we don't actually
373 // have to perform a copy (meaning that we just repeat the {ctrl[entry] = h}
374 // from above). If we do need to do some actual copying, we set {copy_entry =
375 // Capacity() + entry}.
376
377 int mask = capacity - 1;
378 int copy_entry =
379 ((entry - Group::kWidth) & mask) + 1 + ((Group::kWidth - 1) & mask);
380 DCHECK_IMPLIES(entry < static_cast<int>(Group::kWidth),
381 copy_entry == capacity + entry);
382 DCHECK_IMPLIES(entry >= static_cast<int>(Group::kWidth), copy_entry == entry);
383 ctrl[copy_entry] = h;
384 }
385
386 // static
FindFirstEmpty(uint32_t hash)387 inline int SwissNameDictionary::FindFirstEmpty(uint32_t hash) {
388 // See SwissNameDictionary::FindEntry for description of probing algorithm.
389
390 auto seq = probe(hash, Capacity());
391 while (true) {
392 Group g{CtrlTable() + seq.offset()};
393 auto mask = g.MatchEmpty();
394 if (mask) {
395 // Note that picking the lowest bit set here means using the leftmost
396 // empty bucket in the group. Here, "left" means smaller entry/bucket
397 // index.
398 return seq.offset(mask.LowestBitSet());
399 }
400 seq.next();
401 DCHECK_LT(seq.index(), Capacity());
402 }
403 }
404
SetMetaTableField(int field_index,int value)405 void SwissNameDictionary::SetMetaTableField(int field_index, int value) {
406 // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
407 // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
408 // constants.
409 int capacity = Capacity();
410 ByteArray meta_table = this->meta_table();
411 if (capacity <= kMax1ByteMetaTableCapacity) {
412 SetMetaTableField<uint8_t>(meta_table, field_index, value);
413 } else if (capacity <= kMax2ByteMetaTableCapacity) {
414 SetMetaTableField<uint16_t>(meta_table, field_index, value);
415 } else {
416 SetMetaTableField<uint32_t>(meta_table, field_index, value);
417 }
418 }
419
GetMetaTableField(int field_index)420 int SwissNameDictionary::GetMetaTableField(int field_index) {
421 // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
422 // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
423 // constants.
424 int capacity = Capacity();
425 ByteArray meta_table = this->meta_table();
426 if (capacity <= kMax1ByteMetaTableCapacity) {
427 return GetMetaTableField<uint8_t>(meta_table, field_index);
428 } else if (capacity <= kMax2ByteMetaTableCapacity) {
429 return GetMetaTableField<uint16_t>(meta_table, field_index);
430 } else {
431 return GetMetaTableField<uint32_t>(meta_table, field_index);
432 }
433 }
434
435 // static
436 template <typename T>
SetMetaTableField(ByteArray meta_table,int field_index,int value)437 void SwissNameDictionary::SetMetaTableField(ByteArray meta_table,
438 int field_index, int value) {
439 STATIC_ASSERT((std::is_same<T, uint8_t>::value) ||
440 (std::is_same<T, uint16_t>::value) ||
441 (std::is_same<T, uint32_t>::value));
442 DCHECK_LE(value, std::numeric_limits<T>::max());
443 DCHECK_LT(meta_table.GetDataStartAddress() + field_index * sizeof(T),
444 meta_table.GetDataEndAddress());
445 T* raw_data = reinterpret_cast<T*>(meta_table.GetDataStartAddress());
446 raw_data[field_index] = value;
447 }
448
449 // static
450 template <typename T>
GetMetaTableField(ByteArray meta_table,int field_index)451 int SwissNameDictionary::GetMetaTableField(ByteArray meta_table,
452 int field_index) {
453 STATIC_ASSERT((std::is_same<T, uint8_t>::value) ||
454 (std::is_same<T, uint16_t>::value) ||
455 (std::is_same<T, uint32_t>::value));
456 DCHECK_LT(meta_table.GetDataStartAddress() + field_index * sizeof(T),
457 meta_table.GetDataEndAddress());
458 T* raw_data = reinterpret_cast<T*>(meta_table.GetDataStartAddress());
459 return raw_data[field_index];
460 }
461
MetaTableSizePerEntryFor(int capacity)462 constexpr int SwissNameDictionary::MetaTableSizePerEntryFor(int capacity) {
463 DCHECK(IsValidCapacity(capacity));
464
465 // See the STATIC_ASSERTs on |kMax1ByteMetaTableCapacity| and
466 // |kMax2ByteMetaTableCapacity| in the .cc file for an explanation of these
467 // constants.
468 if (capacity <= kMax1ByteMetaTableCapacity) {
469 return sizeof(uint8_t);
470 } else if (capacity <= kMax2ByteMetaTableCapacity) {
471 return sizeof(uint16_t);
472 } else {
473 return sizeof(uint32_t);
474 }
475 }
476
MetaTableSizeFor(int capacity)477 constexpr int SwissNameDictionary::MetaTableSizeFor(int capacity) {
478 DCHECK(IsValidCapacity(capacity));
479
480 int per_entry_size = MetaTableSizePerEntryFor(capacity);
481
482 // The enumeration table only needs to have as many slots as there can be
483 // present + deleted entries in the hash table (= maximum load factor *
484 // capactiy). Two more slots to store the number of present and deleted
485 // entries.
486 return per_entry_size * (MaxUsableCapacity(capacity) + 2);
487 }
488
IsKey(ReadOnlyRoots roots,Object key_candidate)489 bool SwissNameDictionary::IsKey(ReadOnlyRoots roots, Object key_candidate) {
490 return key_candidate != roots.the_hole_value();
491 }
492
ToKey(ReadOnlyRoots roots,int entry,Object * out_key)493 bool SwissNameDictionary::ToKey(ReadOnlyRoots roots, int entry,
494 Object* out_key) {
495 Object k = KeyAt(entry);
496 if (!IsKey(roots, k)) return false;
497 *out_key = k;
498 return true;
499 }
500
ToKey(ReadOnlyRoots roots,InternalIndex entry,Object * out_key)501 bool SwissNameDictionary::ToKey(ReadOnlyRoots roots, InternalIndex entry,
502 Object* out_key) {
503 return ToKey(roots, entry.as_int(), out_key);
504 }
505
506 // static
507 template <typename IsolateT>
Add(IsolateT * isolate,Handle<SwissNameDictionary> original_table,Handle<Name> key,Handle<Object> value,PropertyDetails details,InternalIndex * entry_out)508 Handle<SwissNameDictionary> SwissNameDictionary::Add(
509 IsolateT* isolate, Handle<SwissNameDictionary> original_table,
510 Handle<Name> key, Handle<Object> value, PropertyDetails details,
511 InternalIndex* entry_out) {
512 DCHECK(original_table->FindEntry(isolate, *key).is_not_found());
513
514 Handle<SwissNameDictionary> table = EnsureGrowable(isolate, original_table);
515
516 int nof = table->NumberOfElements();
517 int nod = table->NumberOfDeletedElements();
518 int new_enum_index = nof + nod;
519
520 int new_entry = table->AddInternal(*key, *value, details);
521
522 table->SetNumberOfElements(nof + 1);
523 table->SetEntryForEnumerationIndex(new_enum_index, new_entry);
524
525 if (entry_out) {
526 *entry_out = InternalIndex(new_entry);
527 }
528
529 return table;
530 }
531
AddInternal(Name key,Object value,PropertyDetails details)532 int SwissNameDictionary::AddInternal(Name key, Object value,
533 PropertyDetails details) {
534 DisallowHeapAllocation no_gc;
535
536 DCHECK(key.IsUniqueName());
537 DCHECK_LE(UsedCapacity(), MaxUsableCapacity(Capacity()));
538
539 uint32_t hash = key.hash();
540
541 // For now we don't re-use deleted buckets (due to enumeration table
542 // complications), which is why we only look for empty buckets here, not
543 // deleted ones.
544 int target = FindFirstEmpty(hash);
545
546 SetCtrl(target, swiss_table::H2(hash));
547 SetKey(target, key);
548 ValueAtPut(target, value);
549 DetailsAtPut(target, details);
550
551 // Note that we do not update the number of elements or the enumeration table
552 // in this function.
553
554 return target;
555 }
556
557 template <typename IsolateT>
Initialize(IsolateT * isolate,ByteArray meta_table,int capacity)558 void SwissNameDictionary::Initialize(IsolateT* isolate, ByteArray meta_table,
559 int capacity) {
560 DCHECK(IsValidCapacity(capacity));
561 DisallowHeapAllocation no_gc;
562 ReadOnlyRoots roots(isolate);
563
564 SetCapacity(capacity);
565 SetHash(PropertyArray::kNoHashSentinel);
566
567 memset(CtrlTable(), Ctrl::kEmpty, CtrlTableSize(capacity));
568
569 MemsetTagged(RawField(DataTableStartOffset()), roots.the_hole_value(),
570 capacity * kDataTableEntryCount);
571
572 set_meta_table(meta_table);
573
574 SetNumberOfElements(0);
575 SetNumberOfDeletedElements(0);
576
577 // We leave the enumeration table PropertyDetails table and uninitialized.
578 }
579
IndexIterator(Handle<SwissNameDictionary> dict,int start)580 SwissNameDictionary::IndexIterator::IndexIterator(
581 Handle<SwissNameDictionary> dict, int start)
582 : enum_index_{start}, dict_{dict} {
583 if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict.is_null()) {
584 used_capacity_ = 0;
585 } else {
586 used_capacity_ = dict->UsedCapacity();
587 }
588 }
589
590 SwissNameDictionary::IndexIterator&
591 SwissNameDictionary::IndexIterator::operator++() {
592 DCHECK_LT(enum_index_, used_capacity_);
593 ++enum_index_;
594 return *this;
595 }
596
597 bool SwissNameDictionary::IndexIterator::operator==(
598 const SwissNameDictionary::IndexIterator& b) const {
599 DCHECK_LE(enum_index_, used_capacity_);
600 DCHECK_LE(b.enum_index_, used_capacity_);
601 DCHECK(dict_.equals(b.dict_));
602
603 return this->enum_index_ == b.enum_index_;
604 }
605
606 bool SwissNameDictionary::IndexIterator::operator!=(
607 const IndexIterator& b) const {
608 return !(*this == b);
609 }
610
611 InternalIndex SwissNameDictionary::IndexIterator::operator*() {
612 DCHECK_LE(enum_index_, used_capacity_);
613
614 if (enum_index_ == used_capacity_) return InternalIndex::NotFound();
615
616 return InternalIndex(dict_->EntryForEnumerationIndex(enum_index_));
617 }
618
IndexIterable(Handle<SwissNameDictionary> dict)619 SwissNameDictionary::IndexIterable::IndexIterable(
620 Handle<SwissNameDictionary> dict)
621 : dict_{dict} {}
622
begin()623 SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::begin() {
624 return IndexIterator(dict_, 0);
625 }
626
end()627 SwissNameDictionary::IndexIterator SwissNameDictionary::IndexIterable::end() {
628 if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && dict_.is_null()) {
629 return IndexIterator(dict_, 0);
630 } else {
631 DCHECK(!dict_.is_null());
632 return IndexIterator(dict_, dict_->UsedCapacity());
633 }
634 }
635
636 SwissNameDictionary::IndexIterable
IterateEntriesOrdered()637 SwissNameDictionary::IterateEntriesOrdered() {
638 // If we are supposed to iterate the empty dictionary (which is non-writable)
639 // and pointer compression with a per-Isolate cage is disabled, we have no
640 // simple way to get the isolate, which we would need to create a handle.
641 // TODO(emrich): Consider always using roots.empty_swiss_dictionary_handle()
642 // in the condition once this function gets Isolate as a parameter in order to
643 // avoid empty dict checks.
644 if (!COMPRESS_POINTERS_IN_ISOLATE_CAGE_BOOL && Capacity() == 0)
645 return IndexIterable(Handle<SwissNameDictionary>::null());
646
647 Isolate* isolate;
648 GetIsolateFromHeapObject(*this, &isolate);
649 DCHECK_NE(isolate, nullptr);
650 return IndexIterable(handle(*this, isolate));
651 }
652
IterateEntries()653 SwissNameDictionary::IndexIterable SwissNameDictionary::IterateEntries() {
654 return IterateEntriesOrdered();
655 }
656
SetHash(int32_t hash)657 void SwissNameDictionary::SetHash(int32_t hash) {
658 WriteField(PrefixOffset(), hash);
659 }
660
Hash()661 int SwissNameDictionary::Hash() { return ReadField<int32_t>(PrefixOffset()); }
662
663 // static
MaxCapacity()664 constexpr int SwissNameDictionary::MaxCapacity() {
665 int const_size =
666 DataTableStartOffset() + ByteArray::kHeaderSize +
667 // Size for present and deleted element count at max capacity:
668 2 * sizeof(uint32_t);
669 int per_entry_size =
670 // size of data table entries:
671 kDataTableEntryCount * kTaggedSize +
672 // ctrl table entry size:
673 kOneByteSize +
674 // PropertyDetails table entry size:
675 kOneByteSize +
676 // Enumeration table entry size at maximum capacity:
677 sizeof(uint32_t);
678
679 int result = (FixedArray::kMaxSize - const_size) / per_entry_size;
680 DCHECK_GE(Smi::kMaxValue, result);
681
682 return result;
683 }
684
685 // static
PrefixOffset()686 constexpr int SwissNameDictionary::PrefixOffset() {
687 return HeapObject::kHeaderSize;
688 }
689
690 // static
CapacityOffset()691 constexpr int SwissNameDictionary::CapacityOffset() {
692 return PrefixOffset() + sizeof(uint32_t);
693 }
694
695 // static
MetaTablePointerOffset()696 constexpr int SwissNameDictionary::MetaTablePointerOffset() {
697 return CapacityOffset() + sizeof(int32_t);
698 }
699
700 // static
DataTableStartOffset()701 constexpr int SwissNameDictionary::DataTableStartOffset() {
702 return MetaTablePointerOffset() + kTaggedSize;
703 }
704
705 // static
DataTableEndOffset(int capacity)706 constexpr int SwissNameDictionary::DataTableEndOffset(int capacity) {
707 return CtrlTableStartOffset(capacity);
708 }
709
710 // static
CtrlTableStartOffset(int capacity)711 constexpr int SwissNameDictionary::CtrlTableStartOffset(int capacity) {
712 return DataTableStartOffset() + DataTableSize(capacity);
713 }
714
715 // static
PropertyDetailsTableStartOffset(int capacity)716 constexpr int SwissNameDictionary::PropertyDetailsTableStartOffset(
717 int capacity) {
718 return CtrlTableStartOffset(capacity) + CtrlTableSize(capacity);
719 }
720
721 // static
IsEmpty(ctrl_t c)722 bool SwissNameDictionary::IsEmpty(ctrl_t c) { return c == Ctrl::kEmpty; }
723
724 // static
IsFull(ctrl_t c)725 bool SwissNameDictionary::IsFull(ctrl_t c) {
726 STATIC_ASSERT(Ctrl::kEmpty < 0);
727 STATIC_ASSERT(Ctrl::kDeleted < 0);
728 STATIC_ASSERT(Ctrl::kSentinel < 0);
729 return c >= 0;
730 }
731
732 // static
IsDeleted(ctrl_t c)733 bool SwissNameDictionary::IsDeleted(ctrl_t c) { return c == Ctrl::kDeleted; }
734
735 // static
IsEmptyOrDeleted(ctrl_t c)736 bool SwissNameDictionary::IsEmptyOrDeleted(ctrl_t c) {
737 STATIC_ASSERT(Ctrl::kDeleted < Ctrl::kSentinel);
738 STATIC_ASSERT(Ctrl::kEmpty < Ctrl::kSentinel);
739 STATIC_ASSERT(Ctrl::kSentinel < 0);
740 return c < Ctrl::kSentinel;
741 }
742
743 // static
744 swiss_table::ProbeSequence<SwissNameDictionary::kGroupWidth>
probe(uint32_t hash,int capacity)745 SwissNameDictionary::probe(uint32_t hash, int capacity) {
746 // If |capacity| is 0, we must produce 1 here, such that the - 1 below
747 // yields 0, which is the correct modulo mask for a table of capacity 0.
748 int non_zero_capacity = capacity | (capacity == 0);
749 return swiss_table::ProbeSequence<SwissNameDictionary::kGroupWidth>(
750 swiss_table::H1(hash), static_cast<uint32_t>(non_zero_capacity - 1));
751 }
752
753 ACCESSORS_CHECKED2(SwissNameDictionary, meta_table, ByteArray,
754 MetaTablePointerOffset(), true,
755 value.length() >= kMetaTableEnumerationDataStartIndex)
756
757 } // namespace internal
758 } // namespace v8
759
760 #endif // V8_OBJECTS_SWISS_NAME_DICTIONARY_INL_H_
761