• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Only including the -inl.h file directly makes the linter complain.
6 #include "src/objects/swiss-name-dictionary.h"
7 
8 #include "src/heap/heap-inl.h"
9 #include "src/objects/swiss-name-dictionary-inl.h"
10 
11 namespace v8 {
12 namespace internal {
13 
14 // static
DeleteEntry(Isolate * isolate,Handle<SwissNameDictionary> table,InternalIndex entry)15 Handle<SwissNameDictionary> SwissNameDictionary::DeleteEntry(
16     Isolate* isolate, Handle<SwissNameDictionary> table, InternalIndex entry) {
17   // GetCtrl() does the bounds check.
18   DCHECK(IsFull(table->GetCtrl(entry.as_int())));
19 
20   int i = entry.as_int();
21 
22   table->SetCtrl(i, Ctrl::kDeleted);
23   table->ClearDataTableEntry(isolate, i);
24   // We leave the PropertyDetails unchanged because they are not relevant for
25   // GC.
26 
27   int nof = table->NumberOfElements();
28   table->SetNumberOfElements(nof - 1);
29   int nod = table->NumberOfDeletedElements();
30   table->SetNumberOfDeletedElements(nod + 1);
31 
32   // TODO(v8:11388) Abseil's flat_hash_map doesn't shrink on deletion, but may
33   // decide on addition to do an in-place rehash to remove deleted elements. We
34   // shrink on deletion here to follow what NameDictionary and
35   // OrderedNameDictionary do. We should investigate which approach works
36   // better.
37   return Shrink(isolate, table);
38 }
39 
40 // static
41 template <typename IsolateT>
Rehash(IsolateT * isolate,Handle<SwissNameDictionary> table,int new_capacity)42 Handle<SwissNameDictionary> SwissNameDictionary::Rehash(
43     IsolateT* isolate, Handle<SwissNameDictionary> table, int new_capacity) {
44   DCHECK(IsValidCapacity(new_capacity));
45   DCHECK_LE(table->NumberOfElements(), MaxUsableCapacity(new_capacity));
46   ReadOnlyRoots roots(isolate);
47 
48   Handle<SwissNameDictionary> new_table =
49       isolate->factory()->NewSwissNameDictionaryWithCapacity(
50           new_capacity, Heap::InYoungGeneration(*table) ? AllocationType::kYoung
51                                                         : AllocationType::kOld);
52 
53   DisallowHeapAllocation no_gc;
54 
55   int new_enum_index = 0;
56   new_table->SetNumberOfElements(table->NumberOfElements());
57   for (int enum_index = 0; enum_index < table->UsedCapacity(); ++enum_index) {
58     int entry = table->EntryForEnumerationIndex(enum_index);
59 
60     Object key;
61 
62     if (table->ToKey(roots, entry, &key)) {
63       Object value = table->ValueAtRaw(entry);
64       PropertyDetails details = table->DetailsAt(entry);
65 
66       int new_entry = new_table->AddInternal(Name::cast(key), value, details);
67 
68       // TODO(v8::11388) Investigate ways of hoisting the branching needed to
69       // select the correct meta table entry size (based on the capacity of the
70       // table) out of the loop.
71       new_table->SetEntryForEnumerationIndex(new_enum_index, new_entry);
72       ++new_enum_index;
73     }
74   }
75 
76   new_table->SetHash(table->Hash());
77   return new_table;
78 }
79 
EqualsForTesting(SwissNameDictionary other)80 bool SwissNameDictionary::EqualsForTesting(SwissNameDictionary other) {
81   if (Capacity() != other.Capacity() ||
82       NumberOfElements() != other.NumberOfElements() ||
83       NumberOfDeletedElements() != other.NumberOfDeletedElements() ||
84       Hash() != other.Hash()) {
85     return false;
86   }
87 
88   for (int i = 0; i < Capacity() + kGroupWidth; i++) {
89     if (CtrlTable()[i] != other.CtrlTable()[i]) {
90       return false;
91     }
92   }
93   for (int i = 0; i < Capacity(); i++) {
94     if (KeyAt(i) != other.KeyAt(i) || ValueAtRaw(i) != other.ValueAtRaw(i)) {
95       return false;
96     }
97     if (IsFull(GetCtrl(i))) {
98       if (DetailsAt(i) != other.DetailsAt(i)) return false;
99     }
100   }
101   for (int i = 0; i < UsedCapacity(); i++) {
102     if (EntryForEnumerationIndex(i) != other.EntryForEnumerationIndex(i)) {
103       return false;
104     }
105   }
106   return true;
107 }
108 
109 // static
ShallowCopy(Isolate * isolate,Handle<SwissNameDictionary> table)110 Handle<SwissNameDictionary> SwissNameDictionary::ShallowCopy(
111     Isolate* isolate, Handle<SwissNameDictionary> table) {
112   // TODO(v8:11388) Consider doing some cleanup during copying: For example, we
113   // could turn kDeleted into kEmpty in certain situations. But this would
114   // require tidying up the enumeration table in a similar fashion as would be
115   // required when trying to re-use deleted entries.
116 
117   if (table->Capacity() == 0) {
118     return table;
119   }
120 
121   int capacity = table->Capacity();
122   int used_capacity = table->UsedCapacity();
123 
124   Handle<SwissNameDictionary> new_table =
125       isolate->factory()->NewSwissNameDictionaryWithCapacity(
126           capacity, Heap::InYoungGeneration(*table) ? AllocationType::kYoung
127                                                     : AllocationType::kOld);
128 
129   new_table->SetHash(table->Hash());
130 
131   DisallowGarbageCollection no_gc;
132   WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
133 
134   if (mode == WriteBarrierMode::SKIP_WRITE_BARRIER) {
135     // Copy data table and ctrl table, which are stored next to each other.
136     void* original_start =
137         reinterpret_cast<void*>(table->field_address(DataTableStartOffset()));
138     void* new_table_start = reinterpret_cast<void*>(
139         new_table->field_address(DataTableStartOffset()));
140     size_t bytes_to_copy = DataTableSize(capacity) + CtrlTableSize(capacity);
141     DCHECK(DataTableEndOffset(capacity) == CtrlTableStartOffset(capacity));
142     MemCopy(new_table_start, original_start, bytes_to_copy);
143   } else {
144     DCHECK_EQ(UPDATE_WRITE_BARRIER, mode);
145 
146     // We may have to trigger write barriers when copying the data table.
147     for (int i = 0; i < capacity; ++i) {
148       Object key = table->KeyAt(i);
149       Object value = table->ValueAtRaw(i);
150 
151       // Cannot use SetKey/ValueAtPut because they don't accept the hole as data
152       // to store.
153       new_table->StoreToDataTable(i, kDataTableKeyEntryIndex, key);
154       new_table->StoreToDataTable(i, kDataTableValueEntryIndex, value);
155     }
156 
157     void* original_ctrl_table = table->CtrlTable();
158     void* new_ctrl_table = new_table->CtrlTable();
159     MemCopy(new_ctrl_table, original_ctrl_table, CtrlTableSize(capacity));
160   }
161 
162   // PropertyDetails table may contain uninitialized data for unused slots.
163   for (int i = 0; i < capacity; ++i) {
164     if (IsFull(table->GetCtrl(i))) {
165       new_table->DetailsAtPut(i, table->DetailsAt(i));
166     }
167   }
168 
169   // Meta table is only initialized for the first 2 + UsedCapacity() entries,
170   // where size of each entry depends on table capacity.
171   int size_per_meta_table_entry = MetaTableSizePerEntryFor(capacity);
172   int meta_table_used_bytes = (2 + used_capacity) * size_per_meta_table_entry;
173   new_table->meta_table().copy_in(0, table->meta_table().GetDataStartAddress(),
174                                   meta_table_used_bytes);
175 
176   return new_table;
177 }
178 
179 // static
Shrink(Isolate * isolate,Handle<SwissNameDictionary> table)180 Handle<SwissNameDictionary> SwissNameDictionary::Shrink(
181     Isolate* isolate, Handle<SwissNameDictionary> table) {
182   // TODO(v8:11388) We're using the same logic to decide whether or not to
183   // shrink as OrderedNameDictionary and NameDictionary here. We should compare
184   // this with the logic used by Abseil's flat_hash_map, which has a heuristic
185   // for triggering an (in-place) rehash on addition, but never shrinks the
186   // table. Abseil's heuristic doesn't take the numbere of deleted elements into
187   // account, because it doesn't track that.
188 
189   int nof = table->NumberOfElements();
190   int capacity = table->Capacity();
191   if (nof >= (capacity >> 2)) return table;
192   int new_capacity = std::max(capacity / 2, kInitialCapacity);
193   return Rehash(isolate, table, new_capacity);
194 }
195 
196 // TODO(v8::11388) Copying all data into a std::vector and then re-adding into
197 // the table doesn't seem like a good algorithm. Abseil's Swiss Tables come with
198 // a clever algorithm for re-hashing in place: It first changes the control
199 // table, effectively changing the roles of full, empty and deleted buckets. It
200 // then moves each entry to its new bucket by swapping entries (see
201 // drop_deletes_without_resize in Abseil's raw_hash_set.h). This algorithm could
202 // generally adapted to work on our insertion order preserving implementation,
203 // too. However, it would require a mapping from hash table buckets back to
204 // enumeration indices. This could either be be created in this function
205 // (requiring a vector with Capacity() entries and a separate pass over the
206 // enumeration table) or by creating this backwards mapping ahead of time and
207 // storing it somewhere in the main table or the meta table, for those
208 // SwissNameDictionaries that we know will be in-place rehashed, most notably
209 // those stored in the snapshot.
210 template <typename IsolateT>
Rehash(IsolateT * isolate)211 void SwissNameDictionary::Rehash(IsolateT* isolate) {
212   DisallowHeapAllocation no_gc;
213 
214   struct Entry {
215     Name key;
216     Object value;
217     PropertyDetails details;
218   };
219 
220   if (Capacity() == 0) return;
221 
222   Entry dummy{Name(), Object(), PropertyDetails::Empty()};
223   std::vector<Entry> data(NumberOfElements(), dummy);
224 
225   ReadOnlyRoots roots(isolate);
226   int data_index = 0;
227   for (int enum_index = 0; enum_index < UsedCapacity(); ++enum_index) {
228     int entry = EntryForEnumerationIndex(enum_index);
229     Object key;
230     if (!ToKey(roots, entry, &key)) continue;
231 
232     data[data_index++] =
233         Entry{Name::cast(key), ValueAtRaw(entry), DetailsAt(entry)};
234   }
235 
236   Initialize(isolate, meta_table(), Capacity());
237 
238   int new_enum_index = 0;
239   SetNumberOfElements(static_cast<int>(data.size()));
240   for (Entry& e : data) {
241     int new_entry = AddInternal(e.key, e.value, e.details);
242 
243     // TODO(v8::11388) Investigate ways of hoisting the branching needed to
244     // select the correct meta table entry size (based on the capacity of the
245     // table) out of the loop.
246     SetEntryForEnumerationIndex(new_enum_index, new_entry);
247     ++new_enum_index;
248   }
249 }
250 
251 // TODO(emrich,v8:11388): This is almost an identical copy of
252 // HashTable<..>::NumberOfEnumerableProperties. Consolidate both versions
253 // elsewhere (e.g., hash-table-utils)?
NumberOfEnumerableProperties()254 int SwissNameDictionary::NumberOfEnumerableProperties() {
255   ReadOnlyRoots roots = this->GetReadOnlyRoots();
256   int result = 0;
257   for (InternalIndex i : this->IterateEntries()) {
258     Object k;
259     if (!this->ToKey(roots, i, &k)) continue;
260     if (k.FilterKey(ENUMERABLE_STRINGS)) continue;
261     PropertyDetails details = this->DetailsAt(i);
262     PropertyAttributes attr = details.attributes();
263     if ((attr & ONLY_ENUMERABLE) == 0) result++;
264   }
265   return result;
266 }
267 
268 // TODO(emrich, v8:11388): This is almost an identical copy of
269 // Dictionary<..>::SlowReverseLookup. Consolidate both versions elsewhere (e.g.,
270 // hash-table-utils)?
SlowReverseLookup(Isolate * isolate,Object value)271 Object SwissNameDictionary::SlowReverseLookup(Isolate* isolate, Object value) {
272   ReadOnlyRoots roots(isolate);
273   for (InternalIndex i : IterateEntries()) {
274     Object k;
275     if (!ToKey(roots, i, &k)) continue;
276     Object e = this->ValueAt(i);
277     if (e == value) return k;
278   }
279   return roots.undefined_value();
280 }
281 
282 // The largest value we ever have to store in the enumeration table is
283 // Capacity() - 1. The largest value we ever have to store for the present or
284 // deleted element count is MaxUsableCapacity(Capacity()). All data in the
285 // meta table is unsigned. Using this, we verify the values of the constants
286 // |kMax1ByteMetaTableCapacity| and |kMax2ByteMetaTableCapacity|.
287 STATIC_ASSERT(SwissNameDictionary::kMax1ByteMetaTableCapacity - 1 <=
288               std::numeric_limits<uint8_t>::max());
289 STATIC_ASSERT(SwissNameDictionary::MaxUsableCapacity(
290                   SwissNameDictionary::kMax1ByteMetaTableCapacity) <=
291               std::numeric_limits<uint8_t>::max());
292 STATIC_ASSERT(SwissNameDictionary::kMax2ByteMetaTableCapacity - 1 <=
293               std::numeric_limits<uint16_t>::max());
294 STATIC_ASSERT(SwissNameDictionary::MaxUsableCapacity(
295                   SwissNameDictionary::kMax2ByteMetaTableCapacity) <=
296               std::numeric_limits<uint16_t>::max());
297 
298 template V8_EXPORT_PRIVATE void SwissNameDictionary::Initialize(
299     Isolate* isolate, ByteArray meta_table, int capacity);
300 template V8_EXPORT_PRIVATE void SwissNameDictionary::Initialize(
301     LocalIsolate* isolate, ByteArray meta_table, int capacity);
302 
303 template V8_EXPORT_PRIVATE Handle<SwissNameDictionary>
304 SwissNameDictionary::Rehash(LocalIsolate* isolate,
305                             Handle<SwissNameDictionary> table,
306                             int new_capacity);
307 template V8_EXPORT_PRIVATE Handle<SwissNameDictionary>
308 SwissNameDictionary::Rehash(Isolate* isolate, Handle<SwissNameDictionary> table,
309                             int new_capacity);
310 
311 template V8_EXPORT_PRIVATE void SwissNameDictionary::Rehash(
312     LocalIsolate* isolate);
313 template V8_EXPORT_PRIVATE void SwissNameDictionary::Rehash(Isolate* isolate);
314 
315 constexpr int SwissNameDictionary::kInitialCapacity;
316 constexpr int SwissNameDictionary::kGroupWidth;
317 
318 }  // namespace internal
319 }  // namespace v8
320