• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/snapshot/deserializer.h"
6 
7 #include "src/base/logging.h"
8 #include "src/codegen/assembler-inl.h"
9 #include "src/common/assert-scope.h"
10 #include "src/common/external-pointer.h"
11 #include "src/common/globals.h"
12 #include "src/execution/isolate.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/heap-write-barrier-inl.h"
15 #include "src/heap/heap-write-barrier.h"
16 #include "src/heap/read-only-heap.h"
17 #include "src/interpreter/interpreter.h"
18 #include "src/logging/log.h"
19 #include "src/objects/api-callbacks.h"
20 #include "src/objects/cell-inl.h"
21 #include "src/objects/embedder-data-array-inl.h"
22 #include "src/objects/hash-table.h"
23 #include "src/objects/js-array-buffer-inl.h"
24 #include "src/objects/js-array-inl.h"
25 #include "src/objects/maybe-object.h"
26 #include "src/objects/objects-body-descriptors-inl.h"
27 #include "src/objects/objects.h"
28 #include "src/objects/slots.h"
29 #include "src/objects/smi.h"
30 #include "src/objects/string.h"
31 #include "src/roots/roots.h"
32 #include "src/snapshot/embedded/embedded-data.h"
33 #include "src/snapshot/references.h"
34 #include "src/snapshot/serializer-deserializer.h"
35 #include "src/snapshot/snapshot-data.h"
36 #include "src/snapshot/snapshot.h"
37 #include "src/tracing/trace-event.h"
38 #include "src/tracing/traced-value.h"
39 #include "src/utils/memcopy.h"
40 
41 namespace v8 {
42 namespace internal {
43 
44 // A SlotAccessor for a slot in a HeapObject, which abstracts the slot
45 // operations done by the deserializer in a way which is GC-safe. In particular,
46 // rather than an absolute slot address, this accessor holds a Handle to the
47 // HeapObject, which is updated if the HeapObject moves.
48 class SlotAccessorForHeapObject {
49  public:
ForSlotIndex(Handle<HeapObject> object,int index)50   static SlotAccessorForHeapObject ForSlotIndex(Handle<HeapObject> object,
51                                                 int index) {
52     return SlotAccessorForHeapObject(object, index * kTaggedSize);
53   }
ForSlotOffset(Handle<HeapObject> object,int offset)54   static SlotAccessorForHeapObject ForSlotOffset(Handle<HeapObject> object,
55                                                  int offset) {
56     return SlotAccessorForHeapObject(object, offset);
57   }
58 
slot() const59   MaybeObjectSlot slot() const { return object_->RawMaybeWeakField(offset_); }
object() const60   Handle<HeapObject> object() const { return object_; }
offset() const61   int offset() const { return offset_; }
62 
63   // Writes the given value to this slot, optionally with an offset (e.g. for
64   // repeat writes). Returns the number of slots written (which is one).
Write(MaybeObject value,int slot_offset=0)65   int Write(MaybeObject value, int slot_offset = 0) {
66     MaybeObjectSlot current_slot = slot() + slot_offset;
67     current_slot.Relaxed_Store(value);
68     WriteBarrier::Marking(*object_, current_slot, value);
69     // No need for a generational write barrier.
70     DCHECK(!Heap::InYoungGeneration(value));
71     return 1;
72   }
Write(HeapObject value,HeapObjectReferenceType ref_type,int slot_offset=0)73   int Write(HeapObject value, HeapObjectReferenceType ref_type,
74             int slot_offset = 0) {
75     return Write(HeapObjectReference::From(value, ref_type), slot_offset);
76   }
Write(Handle<HeapObject> value,HeapObjectReferenceType ref_type,int slot_offset=0)77   int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
78             int slot_offset = 0) {
79     return Write(*value, ref_type, slot_offset);
80   }
81 
82   // Same as Write, but additionally with a generational barrier.
WriteWithGenerationalBarrier(MaybeObject value)83   int WriteWithGenerationalBarrier(MaybeObject value) {
84     MaybeObjectSlot current_slot = slot();
85     current_slot.Relaxed_Store(value);
86     WriteBarrier::Marking(*object_, current_slot, value);
87     if (Heap::InYoungGeneration(value)) {
88       GenerationalBarrier(*object_, current_slot, value);
89     }
90     return 1;
91   }
WriteWithGenerationalBarrier(HeapObject value,HeapObjectReferenceType ref_type)92   int WriteWithGenerationalBarrier(HeapObject value,
93                                    HeapObjectReferenceType ref_type) {
94     return WriteWithGenerationalBarrier(
95         HeapObjectReference::From(value, ref_type));
96   }
WriteWithGenerationalBarrier(Handle<HeapObject> value,HeapObjectReferenceType ref_type)97   int WriteWithGenerationalBarrier(Handle<HeapObject> value,
98                                    HeapObjectReferenceType ref_type) {
99     return WriteWithGenerationalBarrier(*value, ref_type);
100   }
101 
102  private:
SlotAccessorForHeapObject(Handle<HeapObject> object,int offset)103   SlotAccessorForHeapObject(Handle<HeapObject> object, int offset)
104       : object_(object), offset_(offset) {}
105 
106   const Handle<HeapObject> object_;
107   const int offset_;
108 };
109 
110 // A SlotAccessor for absolute full slot addresses.
111 class SlotAccessorForRootSlots {
112  public:
SlotAccessorForRootSlots(FullMaybeObjectSlot slot)113   explicit SlotAccessorForRootSlots(FullMaybeObjectSlot slot) : slot_(slot) {}
114 
slot() const115   FullMaybeObjectSlot slot() const { return slot_; }
object() const116   Handle<HeapObject> object() const { UNREACHABLE(); }
offset() const117   int offset() const { UNREACHABLE(); }
118 
119   // Writes the given value to this slot, optionally with an offset (e.g. for
120   // repeat writes). Returns the number of slots written (which is one).
Write(MaybeObject value,int slot_offset=0)121   int Write(MaybeObject value, int slot_offset = 0) {
122     FullMaybeObjectSlot current_slot = slot() + slot_offset;
123     current_slot.Relaxed_Store(value);
124     return 1;
125   }
Write(HeapObject value,HeapObjectReferenceType ref_type,int slot_offset=0)126   int Write(HeapObject value, HeapObjectReferenceType ref_type,
127             int slot_offset = 0) {
128     return Write(HeapObjectReference::From(value, ref_type), slot_offset);
129   }
Write(Handle<HeapObject> value,HeapObjectReferenceType ref_type,int slot_offset=0)130   int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
131             int slot_offset = 0) {
132     return Write(*value, ref_type, slot_offset);
133   }
134 
WriteWithGenerationalBarrier(MaybeObject value)135   int WriteWithGenerationalBarrier(MaybeObject value) { return Write(value); }
WriteWithGenerationalBarrier(HeapObject value,HeapObjectReferenceType ref_type)136   int WriteWithGenerationalBarrier(HeapObject value,
137                                    HeapObjectReferenceType ref_type) {
138     return WriteWithGenerationalBarrier(
139         HeapObjectReference::From(value, ref_type));
140   }
WriteWithGenerationalBarrier(Handle<HeapObject> value,HeapObjectReferenceType ref_type)141   int WriteWithGenerationalBarrier(Handle<HeapObject> value,
142                                    HeapObjectReferenceType ref_type) {
143     return WriteWithGenerationalBarrier(*value, ref_type);
144   }
145 
146  private:
147   const FullMaybeObjectSlot slot_;
148 };
149 
150 // A SlotAccessor for creating a Handle, which saves a Handle allocation when
151 // a Handle already exists.
152 class SlotAccessorForHandle {
153  public:
SlotAccessorForHandle(Handle<HeapObject> * handle,Isolate * isolate)154   SlotAccessorForHandle(Handle<HeapObject>* handle, Isolate* isolate)
155       : handle_(handle), isolate_(isolate) {}
156 
slot() const157   MaybeObjectSlot slot() const { UNREACHABLE(); }
object() const158   Handle<HeapObject> object() const { UNREACHABLE(); }
offset() const159   int offset() const { UNREACHABLE(); }
160 
Write(MaybeObject value,int slot_offset=0)161   int Write(MaybeObject value, int slot_offset = 0) { UNREACHABLE(); }
Write(HeapObject value,HeapObjectReferenceType ref_type,int slot_offset=0)162   int Write(HeapObject value, HeapObjectReferenceType ref_type,
163             int slot_offset = 0) {
164     DCHECK_EQ(slot_offset, 0);
165     DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
166     *handle_ = handle(value, isolate_);
167     return 1;
168   }
Write(Handle<HeapObject> value,HeapObjectReferenceType ref_type,int slot_offset=0)169   int Write(Handle<HeapObject> value, HeapObjectReferenceType ref_type,
170             int slot_offset = 0) {
171     DCHECK_EQ(slot_offset, 0);
172     DCHECK_EQ(ref_type, HeapObjectReferenceType::STRONG);
173     *handle_ = value;
174     return 1;
175   }
176 
WriteWithGenerationalBarrier(HeapObject value,HeapObjectReferenceType ref_type)177   int WriteWithGenerationalBarrier(HeapObject value,
178                                    HeapObjectReferenceType ref_type) {
179     return Write(value, ref_type);
180   }
WriteWithGenerationalBarrier(Handle<HeapObject> value,HeapObjectReferenceType ref_type)181   int WriteWithGenerationalBarrier(Handle<HeapObject> value,
182                                    HeapObjectReferenceType ref_type) {
183     return Write(value, ref_type);
184   }
185 
186  private:
187   Handle<HeapObject>* handle_;
188   Isolate* isolate_;
189 };
190 
191 template <typename TSlot>
WriteAddress(TSlot dest,Address value)192 int Deserializer::WriteAddress(TSlot dest, Address value) {
193   DCHECK(!next_reference_is_weak_);
194   memcpy(dest.ToVoidPtr(), &value, kSystemPointerSize);
195   STATIC_ASSERT(IsAligned(kSystemPointerSize, TSlot::kSlotDataSize));
196   return (kSystemPointerSize / TSlot::kSlotDataSize);
197 }
198 
199 template <typename TSlot>
WriteExternalPointer(TSlot dest,Address value,ExternalPointerTag tag)200 int Deserializer::WriteExternalPointer(TSlot dest, Address value,
201                                        ExternalPointerTag tag) {
202   DCHECK(!next_reference_is_weak_);
203   InitExternalPointerField(dest.address(), isolate(), value, tag);
204   STATIC_ASSERT(IsAligned(kExternalPointerSize, TSlot::kSlotDataSize));
205   return (kExternalPointerSize / TSlot::kSlotDataSize);
206 }
207 
Deserializer(Isolate * isolate,Vector<const byte> payload,uint32_t magic_number,bool deserializing_user_code,bool can_rehash)208 Deserializer::Deserializer(Isolate* isolate, Vector<const byte> payload,
209                            uint32_t magic_number, bool deserializing_user_code,
210                            bool can_rehash)
211     : isolate_(isolate),
212       source_(payload),
213       magic_number_(magic_number),
214       deserializing_user_code_(deserializing_user_code),
215       can_rehash_(can_rehash) {
216   DCHECK_NOT_NULL(isolate);
217   isolate_->RegisterDeserializerStarted();
218 
219   // We start the indices here at 1, so that we can distinguish between an
220   // actual index and a nullptr (serialized as kNullRefSentinel) in a
221   // deserialized object requiring fix-up.
222   STATIC_ASSERT(kNullRefSentinel == 0);
223   backing_stores_.push_back({});
224 
225 #ifdef DEBUG
226   num_api_references_ = 0;
227   // The read-only deserializer is run by read-only heap set-up before the
228   // heap is fully set up. External reference table relies on a few parts of
229   // this set-up (like old-space), so it may be uninitialized at this point.
230   if (isolate->isolate_data()->external_reference_table()->is_initialized()) {
231     // Count the number of external references registered through the API.
232     if (isolate->api_external_references() != nullptr) {
233       while (isolate->api_external_references()[num_api_references_] != 0) {
234         num_api_references_++;
235       }
236     }
237   }
238 #endif  // DEBUG
239   CHECK_EQ(magic_number_, SerializedData::kMagicNumber);
240 }
241 
Rehash()242 void Deserializer::Rehash() {
243   DCHECK(can_rehash() || deserializing_user_code());
244   for (Handle<HeapObject> item : to_rehash_) {
245     item->RehashBasedOnMap(isolate());
246   }
247 }
248 
~Deserializer()249 Deserializer::~Deserializer() {
250 #ifdef DEBUG
251   // Do not perform checks if we aborted deserialization.
252   if (source_.position() == 0) return;
253   // Check that we only have padding bytes remaining.
254   while (source_.HasMore()) DCHECK_EQ(kNop, source_.Get());
255   // Check that there are no remaining forward refs.
256   DCHECK_EQ(num_unresolved_forward_refs_, 0);
257   DCHECK(unresolved_forward_refs_.empty());
258 #endif  // DEBUG
259   isolate_->RegisterDeserializerFinished();
260 }
261 
262 // This is called on the roots.  It is the driver of the deserialization
263 // process.  It is also called on the body of each function.
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)264 void Deserializer::VisitRootPointers(Root root, const char* description,
265                                      FullObjectSlot start, FullObjectSlot end) {
266   ReadData(FullMaybeObjectSlot(start), FullMaybeObjectSlot(end));
267 }
268 
Synchronize(VisitorSynchronization::SyncTag tag)269 void Deserializer::Synchronize(VisitorSynchronization::SyncTag tag) {
270   static const byte expected = kSynchronize;
271   CHECK_EQ(expected, source_.Get());
272 }
273 
DeserializeDeferredObjects()274 void Deserializer::DeserializeDeferredObjects() {
275   for (int code = source_.Get(); code != kSynchronize; code = source_.Get()) {
276     SnapshotSpace space = NewObject::Decode(code);
277     ReadObject(space);
278   }
279 }
280 
LogNewMapEvents()281 void Deserializer::LogNewMapEvents() {
282   DisallowGarbageCollection no_gc;
283   for (Handle<Map> map : new_maps_) {
284     DCHECK(FLAG_trace_maps);
285     LOG(isolate(), MapCreate(*map));
286     LOG(isolate(), MapDetails(*map));
287   }
288 }
289 
WeakenDescriptorArrays()290 void Deserializer::WeakenDescriptorArrays() {
291   DisallowHeapAllocation no_gc;
292   for (Handle<DescriptorArray> descriptor_array : new_descriptor_arrays_) {
293     DCHECK(descriptor_array->IsStrongDescriptorArray());
294     descriptor_array->set_map(ReadOnlyRoots(isolate()).descriptor_array_map());
295     WriteBarrier::Marking(*descriptor_array,
296                           descriptor_array->number_of_descriptors());
297   }
298 }
299 
LogScriptEvents(Script script)300 void Deserializer::LogScriptEvents(Script script) {
301   DisallowGarbageCollection no_gc;
302   LOG(isolate(),
303       ScriptEvent(Logger::ScriptEventType::kDeserialize, script.id()));
304   LOG(isolate(), ScriptDetails(script));
305 }
306 
StringTableInsertionKey(Handle<String> string)307 StringTableInsertionKey::StringTableInsertionKey(Handle<String> string)
308     : StringTableKey(ComputeHashField(*string), string->length()),
309       string_(string) {
310   DCHECK(string->IsInternalizedString());
311 }
312 
IsMatch(String string)313 bool StringTableInsertionKey::IsMatch(String string) {
314   // We want to compare the content of two strings here.
315   return string_->SlowEquals(string);
316 }
317 
AsHandle(Isolate * isolate)318 Handle<String> StringTableInsertionKey::AsHandle(Isolate* isolate) {
319   return string_;
320 }
321 
ComputeHashField(String string)322 uint32_t StringTableInsertionKey::ComputeHashField(String string) {
323   // Make sure hash_field() is computed.
324   string.Hash();
325   return string.hash_field();
326 }
327 
PostProcessNewObject(Handle<Map> map,Handle<HeapObject> obj,SnapshotSpace space)328 void Deserializer::PostProcessNewObject(Handle<Map> map, Handle<HeapObject> obj,
329                                         SnapshotSpace space) {
330   DCHECK_EQ(*map, obj->map());
331   DisallowGarbageCollection no_gc;
332   InstanceType instance_type = map->instance_type();
333 
334   if ((FLAG_rehash_snapshot && can_rehash_) || deserializing_user_code()) {
335     if (InstanceTypeChecker::IsString(instance_type)) {
336       // Uninitialize hash field as we need to recompute the hash.
337       Handle<String> string = Handle<String>::cast(obj);
338       string->set_hash_field(String::kEmptyHashField);
339       // Rehash strings before read-only space is sealed. Strings outside
340       // read-only space are rehashed lazily. (e.g. when rehashing dictionaries)
341       if (space == SnapshotSpace::kReadOnlyHeap) {
342         to_rehash_.push_back(obj);
343       }
344     } else if (obj->NeedsRehashing(instance_type)) {
345       to_rehash_.push_back(obj);
346     }
347   }
348 
349   if (deserializing_user_code()) {
350     if (InstanceTypeChecker::IsInternalizedString(instance_type)) {
351       // Canonicalize the internalized string. If it already exists in the
352       // string table, set it to forward to the existing one.
353       Handle<String> string = Handle<String>::cast(obj);
354 
355       StringTableInsertionKey key(string);
356       Handle<String> result =
357           isolate()->string_table()->LookupKey(isolate(), &key);
358 
359       if (FLAG_thin_strings && *result != *string) {
360         string->MakeThin(isolate(), *result);
361         // Mutate the given object handle so that the backreference entry is
362         // also updated.
363         obj.PatchValue(*result);
364       }
365       return;
366     } else if (InstanceTypeChecker::IsScript(instance_type)) {
367       new_scripts_.push_back(Handle<Script>::cast(obj));
368     } else if (InstanceTypeChecker::IsAllocationSite(instance_type)) {
369       // We should link new allocation sites, but we can't do this immediately
370       // because |AllocationSite::HasWeakNext()| internally accesses
371       // |Heap::roots_| that may not have been initialized yet. So defer this to
372       // |ObjectDeserializer::CommitPostProcessedObjects()|.
373       new_allocation_sites_.push_back(Handle<AllocationSite>::cast(obj));
374     } else {
375       DCHECK(CanBeDeferred(*obj));
376     }
377   }
378 
379   if (InstanceTypeChecker::IsScript(instance_type)) {
380     LogScriptEvents(Script::cast(*obj));
381   } else if (InstanceTypeChecker::IsCode(instance_type)) {
382     // We flush all code pages after deserializing the startup snapshot.
383     // Hence we only remember each individual code object when deserializing
384     // user code.
385     if (deserializing_user_code()) {
386       new_code_objects_.push_back(Handle<Code>::cast(obj));
387     }
388   } else if (InstanceTypeChecker::IsMap(instance_type)) {
389     if (FLAG_trace_maps) {
390       // Keep track of all seen Maps to log them later since they might be only
391       // partially initialized at this point.
392       new_maps_.push_back(Handle<Map>::cast(obj));
393     }
394   } else if (InstanceTypeChecker::IsAccessorInfo(instance_type)) {
395 #ifdef USE_SIMULATOR
396     accessor_infos_.push_back(Handle<AccessorInfo>::cast(obj));
397 #endif
398   } else if (InstanceTypeChecker::IsCallHandlerInfo(instance_type)) {
399 #ifdef USE_SIMULATOR
400     call_handler_infos_.push_back(Handle<CallHandlerInfo>::cast(obj));
401 #endif
402   } else if (InstanceTypeChecker::IsExternalString(instance_type)) {
403     Handle<ExternalString> string = Handle<ExternalString>::cast(obj);
404     uint32_t index = string->GetResourceRefForDeserialization();
405     Address address =
406         static_cast<Address>(isolate()->api_external_references()[index]);
407     string->AllocateExternalPointerEntries(isolate());
408     string->set_address_as_resource(isolate(), address);
409     isolate()->heap()->UpdateExternalString(*string, 0,
410                                             string->ExternalPayloadSize());
411     isolate()->heap()->RegisterExternalString(*string);
412   } else if (InstanceTypeChecker::IsJSDataView(instance_type)) {
413     Handle<JSDataView> data_view = Handle<JSDataView>::cast(obj);
414     JSArrayBuffer buffer = JSArrayBuffer::cast(data_view->buffer());
415     void* backing_store = nullptr;
416     uint32_t store_index = buffer.GetBackingStoreRefForDeserialization();
417     if (store_index != kNullRefSentinel) {
418       // The backing store of the JSArrayBuffer has not been correctly restored
419       // yet, as that may trigger GC. The backing_store field currently contains
420       // a numbered reference to an already deserialized backing store.
421       backing_store = backing_stores_[store_index]->buffer_start();
422     }
423     data_view->AllocateExternalPointerEntries(isolate());
424     data_view->set_data_pointer(
425         isolate(),
426         reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
427   } else if (InstanceTypeChecker::IsJSTypedArray(instance_type)) {
428     Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(obj);
429     // Fixup typed array pointers.
430     if (typed_array->is_on_heap()) {
431       Address raw_external_pointer = typed_array->external_pointer_raw();
432       typed_array->AllocateExternalPointerEntries(isolate());
433       typed_array->SetOnHeapDataPtr(
434           isolate(), HeapObject::cast(typed_array->base_pointer()),
435           raw_external_pointer);
436     } else {
437       // Serializer writes backing store ref as a DataPtr() value.
438       uint32_t store_index =
439           typed_array->GetExternalBackingStoreRefForDeserialization();
440       auto backing_store = backing_stores_[store_index];
441       auto start = backing_store
442                        ? reinterpret_cast<byte*>(backing_store->buffer_start())
443                        : nullptr;
444       typed_array->AllocateExternalPointerEntries(isolate());
445       typed_array->SetOffHeapDataPtr(isolate(), start,
446                                      typed_array->byte_offset());
447     }
448   } else if (InstanceTypeChecker::IsJSArrayBuffer(instance_type)) {
449     Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(obj);
450     // Postpone allocation of backing store to avoid triggering the GC.
451     if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
452       new_off_heap_array_buffers_.push_back(buffer);
453     } else {
454       buffer->AllocateExternalPointerEntries(isolate());
455       buffer->set_backing_store(isolate(), nullptr);
456     }
457   } else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
458     // TODO(mythria): Remove these once we store the default values for these
459     // fields in the serializer.
460     Handle<BytecodeArray> bytecode_array = Handle<BytecodeArray>::cast(obj);
461     bytecode_array->set_osr_loop_nesting_level(0);
462   } else if (InstanceTypeChecker::IsDescriptorArray(instance_type)) {
463     DCHECK(InstanceTypeChecker::IsStrongDescriptorArray(instance_type));
464     Handle<DescriptorArray> descriptors = Handle<DescriptorArray>::cast(obj);
465     new_descriptor_arrays_.push_back(descriptors);
466   }
467 
468   // Check alignment.
469   DCHECK_EQ(0, Heap::GetFillToAlign(obj->address(),
470                                     HeapObject::RequiredAlignment(*map)));
471 }
472 
GetAndResetNextReferenceType()473 HeapObjectReferenceType Deserializer::GetAndResetNextReferenceType() {
474   HeapObjectReferenceType type = next_reference_is_weak_
475                                      ? HeapObjectReferenceType::WEAK
476                                      : HeapObjectReferenceType::STRONG;
477   next_reference_is_weak_ = false;
478   return type;
479 }
480 
GetBackReferencedObject()481 Handle<HeapObject> Deserializer::GetBackReferencedObject() {
482   Handle<HeapObject> obj = back_refs_[source_.GetInt()];
483 
484   // We don't allow ThinStrings in backreferences -- if internalization produces
485   // a thin string, then it should also update the backref handle.
486   DCHECK(!obj->IsThinString());
487 
488   hot_objects_.Add(obj);
489   DCHECK(!HasWeakHeapObjectTag(*obj));
490   return obj;
491 }
492 
ReadObject()493 Handle<HeapObject> Deserializer::ReadObject() {
494   Handle<HeapObject> ret;
495   CHECK_EQ(ReadSingleBytecodeData(source_.Get(),
496                                   SlotAccessorForHandle(&ret, isolate())),
497            1);
498   return ret;
499 }
500 
ReadObject(SnapshotSpace space)501 Handle<HeapObject> Deserializer::ReadObject(SnapshotSpace space) {
502   const int size_in_tagged = source_.GetInt();
503   const int size_in_bytes = size_in_tagged * kTaggedSize;
504 
505   // The map can't be a forward ref. If you want the map to be a forward ref,
506   // then you're probably serializing the meta-map, in which case you want to
507   // use the kNewMetaMap bytecode.
508   DCHECK_NE(source()->Peek(), kRegisterPendingForwardRef);
509   Handle<Map> map = Handle<Map>::cast(ReadObject());
510 
511   // Filling an object's fields can cause GCs and heap walks, so this object has
512   // to be in a 'sufficiently initialised' state by the time the next allocation
513   // can happen. For this to be the case, the object is carefully deserialized
514   // as follows:
515   //   * The space for the object is allocated.
516   //   * The map is set on the object so that the GC knows what type the object
517   //     has.
518   //   * The rest of the object is filled with a fixed Smi value
519   //     - This is a Smi so that tagged fields become initialized to a valid
520   //       tagged value.
521   //     - It's a fixed value, "uninitialized_field_value", so that we can
522   //       DCHECK for it when reading objects that are assumed to be partially
523   //       initialized objects.
524   //   * The fields of the object are deserialized in order, under the
525   //     assumption that objects are laid out in such a way that any fields
526   //     required for object iteration (e.g. length fields) are deserialized
527   //     before fields with objects.
528   //     - We ensure this is the case by DCHECKing on object allocation that the
529   //       previously allocated object has a valid size (see `Allocate`).
530   HeapObject raw_obj =
531       Allocate(space, size_in_bytes, HeapObject::RequiredAlignment(*map));
532   raw_obj.set_map_after_allocation(*map);
533   MemsetTagged(raw_obj.RawField(kTaggedSize), uninitialized_field_value(),
534                size_in_tagged - 1);
535 
536   // Make sure BytecodeArrays have a valid age, so that the marker doesn't
537   // break when making them older.
538   if (raw_obj.IsBytecodeArray(isolate())) {
539     BytecodeArray::cast(raw_obj).set_bytecode_age(
540         BytecodeArray::kFirstBytecodeAge);
541   }
542 
543 #ifdef DEBUG
544   // We want to make sure that all embedder pointers are initialized to null.
545   if (raw_obj.IsJSObject() && JSObject::cast(raw_obj).IsApiWrapper()) {
546     JSObject js_obj = JSObject::cast(raw_obj);
547     for (int i = 0; i < js_obj.GetEmbedderFieldCount(); ++i) {
548       void* pointer;
549       CHECK(EmbedderDataSlot(js_obj, i).ToAlignedPointerSafe(isolate(),
550                                                              &pointer));
551       CHECK_NULL(pointer);
552     }
553   } else if (raw_obj.IsEmbedderDataArray()) {
554     EmbedderDataArray array = EmbedderDataArray::cast(raw_obj);
555     EmbedderDataSlot start(array, 0);
556     EmbedderDataSlot end(array, array.length());
557     for (EmbedderDataSlot slot = start; slot < end; ++slot) {
558       void* pointer;
559       CHECK(slot.ToAlignedPointerSafe(isolate(), &pointer));
560       CHECK_NULL(pointer);
561     }
562   }
563 #endif
564 
565   Handle<HeapObject> obj = handle(raw_obj, isolate());
566   back_refs_.push_back(obj);
567 
568   ReadData(obj, 1, size_in_tagged);
569   PostProcessNewObject(map, obj, space);
570 
571   DCHECK(!obj->IsThinString(isolate()));
572 
573 #ifdef DEBUG
574   if (obj->IsCode()) {
575     DCHECK(space == SnapshotSpace::kCode ||
576            space == SnapshotSpace::kReadOnlyHeap);
577   } else {
578     DCHECK_NE(space, SnapshotSpace::kCode);
579   }
580 #endif  // DEBUG
581 
582   return obj;
583 }
584 
ReadMetaMap()585 Handle<HeapObject> Deserializer::ReadMetaMap() {
586   const SnapshotSpace space = SnapshotSpace::kReadOnlyHeap;
587   const int size_in_bytes = Map::kSize;
588   const int size_in_tagged = size_in_bytes / kTaggedSize;
589 
590   HeapObject raw_obj = Allocate(space, size_in_bytes, kWordAligned);
591   raw_obj.set_map_after_allocation(Map::unchecked_cast(raw_obj));
592   MemsetTagged(raw_obj.RawField(kTaggedSize), uninitialized_field_value(),
593                size_in_tagged - 1);
594 
595   Handle<HeapObject> obj = handle(raw_obj, isolate());
596   back_refs_.push_back(obj);
597 
598   // Set the instance-type manually, to allow backrefs to read it.
599   Map::unchecked_cast(*obj).set_instance_type(MAP_TYPE);
600 
601   ReadData(obj, 1, size_in_tagged);
602   PostProcessNewObject(Handle<Map>::cast(obj), obj, space);
603 
604   return obj;
605 }
606 
607 class Deserializer::RelocInfoVisitor {
608  public:
RelocInfoVisitor(Deserializer * deserializer,const std::vector<Handle<HeapObject>> * objects)609   RelocInfoVisitor(Deserializer* deserializer,
610                    const std::vector<Handle<HeapObject>>* objects)
611       : deserializer_(deserializer), objects_(objects), current_object_(0) {}
~RelocInfoVisitor()612   ~RelocInfoVisitor() { DCHECK_EQ(current_object_, objects_->size()); }
613 
614   void VisitCodeTarget(Code host, RelocInfo* rinfo);
615   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo);
616   void VisitRuntimeEntry(Code host, RelocInfo* rinfo);
617   void VisitExternalReference(Code host, RelocInfo* rinfo);
618   void VisitInternalReference(Code host, RelocInfo* rinfo);
619   void VisitOffHeapTarget(Code host, RelocInfo* rinfo);
620 
621  private:
isolate()622   Isolate* isolate() { return deserializer_->isolate(); }
source()623   SnapshotByteSource& source() { return deserializer_->source_; }
624 
625   Deserializer* deserializer_;
626   const std::vector<Handle<HeapObject>>* objects_;
627   int current_object_;
628 };
629 
VisitCodeTarget(Code host,RelocInfo * rinfo)630 void Deserializer::RelocInfoVisitor::VisitCodeTarget(Code host,
631                                                      RelocInfo* rinfo) {
632   HeapObject object = *objects_->at(current_object_++);
633   rinfo->set_target_address(Code::cast(object).raw_instruction_start());
634 }
635 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)636 void Deserializer::RelocInfoVisitor::VisitEmbeddedPointer(Code host,
637                                                           RelocInfo* rinfo) {
638   HeapObject object = *objects_->at(current_object_++);
639   // Embedded object reference must be a strong one.
640   rinfo->set_target_object(isolate()->heap(), object);
641 }
642 
VisitRuntimeEntry(Code host,RelocInfo * rinfo)643 void Deserializer::RelocInfoVisitor::VisitRuntimeEntry(Code host,
644                                                        RelocInfo* rinfo) {
645   // We no longer serialize code that contains runtime entries.
646   UNREACHABLE();
647 }
648 
VisitExternalReference(Code host,RelocInfo * rinfo)649 void Deserializer::RelocInfoVisitor::VisitExternalReference(Code host,
650                                                             RelocInfo* rinfo) {
651   byte data = source().Get();
652   CHECK_EQ(data, kExternalReference);
653 
654   Address address = deserializer_->ReadExternalReferenceCase();
655 
656   if (rinfo->IsCodedSpecially()) {
657     Address location_of_branch_data = rinfo->pc();
658     Assembler::deserialization_set_special_target_at(location_of_branch_data,
659                                                      host, address);
660   } else {
661     WriteUnalignedValue(rinfo->target_address_address(), address);
662   }
663 }
664 
VisitInternalReference(Code host,RelocInfo * rinfo)665 void Deserializer::RelocInfoVisitor::VisitInternalReference(Code host,
666                                                             RelocInfo* rinfo) {
667   byte data = source().Get();
668   CHECK_EQ(data, kInternalReference);
669 
670   // Internal reference target is encoded as an offset from code entry.
671   int target_offset = source().GetInt();
672   // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
673   // consider using raw_instruction_size() instead of raw_body_size() in the
674   // future.
675   STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
676   DCHECK_LT(static_cast<unsigned>(target_offset),
677             static_cast<unsigned>(host.raw_body_size()));
678   Address target = host.entry() + target_offset;
679   Assembler::deserialization_set_target_internal_reference_at(
680       rinfo->pc(), target, rinfo->rmode());
681 }
682 
VisitOffHeapTarget(Code host,RelocInfo * rinfo)683 void Deserializer::RelocInfoVisitor::VisitOffHeapTarget(Code host,
684                                                         RelocInfo* rinfo) {
685   byte data = source().Get();
686   CHECK_EQ(data, kOffHeapTarget);
687 
688   int builtin_index = source().GetInt();
689   DCHECK(Builtins::IsBuiltinId(builtin_index));
690 
691   CHECK_NOT_NULL(isolate()->embedded_blob_code());
692   EmbeddedData d = EmbeddedData::FromBlob();
693   Address address = d.InstructionStartOfBuiltin(builtin_index);
694   CHECK_NE(kNullAddress, address);
695 
696   // TODO(ishell): implement RelocInfo::set_target_off_heap_target()
697   if (RelocInfo::OffHeapTargetIsCodedSpecially()) {
698     Address location_of_branch_data = rinfo->pc();
699     Assembler::deserialization_set_special_target_at(location_of_branch_data,
700                                                      host, address);
701   } else {
702     WriteUnalignedValue(rinfo->target_address_address(), address);
703   }
704 }
705 
706 template <typename SlotAccessor>
ReadRepeatedObject(SlotAccessor slot_accessor,int repeat_count)707 int Deserializer::ReadRepeatedObject(SlotAccessor slot_accessor,
708                                      int repeat_count) {
709   CHECK_LE(2, repeat_count);
710 
711   Handle<HeapObject> heap_object = ReadObject();
712   DCHECK(!Heap::InYoungGeneration(*heap_object));
713   for (int i = 0; i < repeat_count; i++) {
714     // TODO(leszeks): Use a ranged barrier here.
715     slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG, i);
716   }
717   return repeat_count;
718 }
719 
720 namespace {
721 
NoExternalReferencesCallback()722 void NoExternalReferencesCallback() {
723   // The following check will trigger if a function or object template
724   // with references to native functions have been deserialized from
725   // snapshot, but no actual external references were provided when the
726   // isolate was created.
727   FATAL("No external references provided via API");
728 }
729 
730 // Template used by the below CASE_RANGE macro to statically verify that the
731 // given number of cases matches the number of expected cases for that bytecode.
732 template <int byte_code_count, int expected>
VerifyBytecodeCount(byte bytecode)733 constexpr byte VerifyBytecodeCount(byte bytecode) {
734   STATIC_ASSERT(byte_code_count == expected);
735   return bytecode;
736 }
737 
738 }  // namespace
739 
740 // Helper macro (and its implementation detail) for specifying a range of cases.
741 // Use as "case CASE_RANGE(byte_code, num_bytecodes):"
742 #define CASE_RANGE(byte_code, num_bytecodes) \
743   CASE_R##num_bytecodes(                     \
744       (VerifyBytecodeCount<byte_code##Count, num_bytecodes>(byte_code)))
745 #define CASE_R1(byte_code) byte_code
746 #define CASE_R2(byte_code) CASE_R1(byte_code) : case CASE_R1(byte_code + 1)
747 #define CASE_R3(byte_code) CASE_R2(byte_code) : case CASE_R1(byte_code + 2)
748 #define CASE_R4(byte_code) CASE_R2(byte_code) : case CASE_R2(byte_code + 2)
749 #define CASE_R8(byte_code) CASE_R4(byte_code) : case CASE_R4(byte_code + 4)
750 #define CASE_R16(byte_code) CASE_R8(byte_code) : case CASE_R8(byte_code + 8)
751 #define CASE_R32(byte_code) CASE_R16(byte_code) : case CASE_R16(byte_code + 16)
752 
753 // This generates a case range for all the spaces.
754 #define CASE_RANGE_ALL_SPACES(bytecode)                           \
755   SpaceEncoder<bytecode>::Encode(SnapshotSpace::kOld)             \
756       : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kCode) \
757       : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kMap)  \
758       : case SpaceEncoder<bytecode>::Encode(SnapshotSpace::kReadOnlyHeap)
759 
ReadData(Handle<HeapObject> object,int start_slot_index,int end_slot_index)760 void Deserializer::ReadData(Handle<HeapObject> object, int start_slot_index,
761                             int end_slot_index) {
762   int current = start_slot_index;
763   while (current < end_slot_index) {
764     byte data = source_.Get();
765     current += ReadSingleBytecodeData(
766         data, SlotAccessorForHeapObject::ForSlotIndex(object, current));
767   }
768   CHECK_EQ(current, end_slot_index);
769 }
770 
ReadData(FullMaybeObjectSlot start,FullMaybeObjectSlot end)771 void Deserializer::ReadData(FullMaybeObjectSlot start,
772                             FullMaybeObjectSlot end) {
773   FullMaybeObjectSlot current = start;
774   while (current < end) {
775     byte data = source_.Get();
776     current += ReadSingleBytecodeData(data, SlotAccessorForRootSlots(current));
777   }
778   CHECK_EQ(current, end);
779 }
780 
781 template <typename SlotAccessor>
ReadSingleBytecodeData(byte data,SlotAccessor slot_accessor)782 int Deserializer::ReadSingleBytecodeData(byte data,
783                                          SlotAccessor slot_accessor) {
784   using TSlot = decltype(slot_accessor.slot());
785 
786   switch (data) {
787     // Deserialize a new object and write a pointer to it to the current
788     // object.
789     case CASE_RANGE_ALL_SPACES(kNewObject): {
790       SnapshotSpace space = NewObject::Decode(data);
791       // Save the reference type before recursing down into reading the object.
792       HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
793       Handle<HeapObject> heap_object = ReadObject(space);
794       return slot_accessor.Write(heap_object, ref_type);
795     }
796 
797     // Find a recently deserialized object using its offset from the current
798     // allocation point and write a pointer to it to the current object.
799     case kBackref: {
800       Handle<HeapObject> heap_object = GetBackReferencedObject();
801       return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
802     }
803 
804     // Reference an object in the read-only heap. This should be used when an
805     // object is read-only, but is not a root.
806     case kReadOnlyHeapRef: {
807       DCHECK(isolate()->heap()->deserialization_complete());
808       uint32_t chunk_index = source_.GetInt();
809       uint32_t chunk_offset = source_.GetInt();
810 
811       ReadOnlySpace* read_only_space = isolate()->heap()->read_only_space();
812       ReadOnlyPage* page = read_only_space->pages()[chunk_index];
813       Address address = page->OffsetToAddress(chunk_offset);
814       HeapObject heap_object = HeapObject::FromAddress(address);
815 
816       return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
817     }
818 
819     // Find an object in the roots array and write a pointer to it to the
820     // current object.
821     case kRootArray: {
822       int id = source_.GetInt();
823       RootIndex root_index = static_cast<RootIndex>(id);
824       Handle<HeapObject> heap_object =
825           Handle<HeapObject>::cast(isolate()->root_handle(root_index));
826       hot_objects_.Add(heap_object);
827       return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
828     }
829 
830     // Find an object in the startup object cache and write a pointer to it to
831     // the current object.
832     case kStartupObjectCache: {
833       int cache_index = source_.GetInt();
834       // TODO(leszeks): Could we use the address of the startup_object_cache
835       // entry as a Handle backing?
836       HeapObject heap_object =
837           HeapObject::cast(isolate()->startup_object_cache()->at(cache_index));
838       return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
839     }
840 
841     // Find an object in the read-only object cache and write a pointer to it
842     // to the current object.
843     case kReadOnlyObjectCache: {
844       int cache_index = source_.GetInt();
845       // TODO(leszeks): Could we use the address of the cached_read_only_object
846       // entry as a Handle backing?
847       HeapObject heap_object = HeapObject::cast(
848           isolate()->read_only_heap()->cached_read_only_object(cache_index));
849       return slot_accessor.Write(heap_object, GetAndResetNextReferenceType());
850     }
851 
852     // Deserialize a new meta-map and write a pointer to it to the current
853     // object.
854     case kNewMetaMap: {
855       Handle<HeapObject> heap_object = ReadMetaMap();
856       return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
857     }
858 
859     // Find an external reference and write a pointer to it to the current
860     // object.
861     case kSandboxedExternalReference:
862     case kExternalReference: {
863       Address address = ReadExternalReferenceCase();
864       if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedExternalReference) {
865         return WriteExternalPointer(slot_accessor.slot(), address,
866                                     kForeignForeignAddressTag);
867       } else {
868         DCHECK(!V8_HEAP_SANDBOX_BOOL);
869         return WriteAddress(slot_accessor.slot(), address);
870       }
871     }
872 
873     case kInternalReference:
874     case kOffHeapTarget:
875       // These bytecodes are expected only during RelocInfo iteration.
876       UNREACHABLE();
877 
878     // Find an object in the attached references and write a pointer to it to
879     // the current object.
880     case kAttachedReference: {
881       int index = source_.GetInt();
882       Handle<HeapObject> heap_object = attached_objects_[index];
883 
884       // This is the only case where we might encounter new space objects, so
885       // maybe emit a generational write barrier.
886       return slot_accessor.WriteWithGenerationalBarrier(
887           heap_object, GetAndResetNextReferenceType());
888     }
889 
890     case kNop:
891       return 0;
892 
893     case kRegisterPendingForwardRef: {
894       HeapObjectReferenceType ref_type = GetAndResetNextReferenceType();
895       unresolved_forward_refs_.emplace_back(slot_accessor.object(),
896                                             slot_accessor.offset(), ref_type);
897       num_unresolved_forward_refs_++;
898       return 1;
899     }
900 
901     case kResolvePendingForwardRef: {
902       // Pending forward refs can only be resolved after the heap object's map
903       // field is deserialized; currently they only appear immediately after
904       // the map field.
905       DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
906       Handle<HeapObject> obj = slot_accessor.object();
907       int index = source_.GetInt();
908       auto& forward_ref = unresolved_forward_refs_[index];
909       SlotAccessorForHeapObject::ForSlotOffset(forward_ref.object,
910                                                forward_ref.offset)
911           .Write(*obj, forward_ref.ref_type);
912       num_unresolved_forward_refs_--;
913       if (num_unresolved_forward_refs_ == 0) {
914         // If there's no more pending fields, clear the entire pending field
915         // vector.
916         unresolved_forward_refs_.clear();
917       } else {
918         // Otherwise, at least clear the pending field.
919         forward_ref.object = Handle<HeapObject>();
920       }
921       return 0;
922     }
923 
924     case kSynchronize:
925       // If we get here then that indicates that you have a mismatch between
926       // the number of GC roots when serializing and deserializing.
927       UNREACHABLE();
928 
929     // Deserialize raw data of variable length.
930     case kVariableRawData: {
931       // This operation is only supported for tagged-size slots, else we might
932       // become misaligned.
933       DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
934       int size_in_tagged = source_.GetInt();
935       // TODO(leszeks): Only copy slots when there are Smis in the serialized
936       // data.
937       source_.CopySlots(slot_accessor.slot().location(), size_in_tagged);
938       return size_in_tagged;
939     }
940 
941     // Deserialize raw code directly into the body of the code object.
942     case kCodeBody: {
943       // This operation is only supported for tagged-size slots, else we might
944       // become misaligned.
945       DCHECK_EQ(TSlot::kSlotDataSize, kTaggedSize);
946       // CodeBody can only occur right after the heap object header.
947       DCHECK_EQ(slot_accessor.offset(), HeapObject::kHeaderSize);
948 
949       int size_in_tagged = source_.GetInt();
950       int size_in_bytes = size_in_tagged * kTaggedSize;
951 
952       {
953         DisallowGarbageCollection no_gc;
954         Code code = Code::cast(*slot_accessor.object());
955 
956         // First deserialize the code itself.
957         source_.CopyRaw(
958             reinterpret_cast<void*>(code.address() + Code::kDataStart),
959             size_in_bytes);
960       }
961 
962       // Then deserialize the code header
963       ReadData(slot_accessor.object(), HeapObject::kHeaderSize / kTaggedSize,
964                Code::kDataStart / kTaggedSize);
965 
966       // Then deserialize the pre-serialized RelocInfo objects.
967       std::vector<Handle<HeapObject>> preserialized_objects;
968       while (source_.Peek() != kSynchronize) {
969         Handle<HeapObject> obj = ReadObject();
970         preserialized_objects.push_back(obj);
971       }
972       // Skip the synchronize bytecode.
973       source_.Advance(1);
974 
975       // Finally iterate RelocInfos (the same way it was done by the serializer)
976       // and deserialize respective data into RelocInfos. The RelocIterator
977       // holds a raw pointer to the code, so we have to disable garbage
978       // collection here. It's ok though, any objects it would have needed are
979       // in the preserialized_objects vector.
980       {
981         DisallowGarbageCollection no_gc;
982 
983         Code code = Code::cast(*slot_accessor.object());
984         RelocInfoVisitor visitor(this, &preserialized_objects);
985         for (RelocIterator it(code, Code::BodyDescriptor::kRelocModeMask);
986              !it.done(); it.next()) {
987           it.rinfo()->Visit(&visitor);
988         }
989       }
990 
991       // Advance to the end of the code object.
992       return (Code::kDataStart - HeapObject::kHeaderSize) / kTaggedSize +
993              size_in_tagged;
994     }
995 
996     case kVariableRepeat: {
997       int repeats = VariableRepeatCount::Decode(source_.GetInt());
998       return ReadRepeatedObject(slot_accessor, repeats);
999     }
1000 
1001     case kOffHeapBackingStore: {
1002       AlwaysAllocateScope scope(isolate()->heap());
1003       int byte_length = source_.GetInt();
1004       std::unique_ptr<BackingStore> backing_store =
1005           BackingStore::Allocate(isolate(), byte_length, SharedFlag::kNotShared,
1006                                  InitializedFlag::kUninitialized);
1007       CHECK_NOT_NULL(backing_store);
1008       source_.CopyRaw(backing_store->buffer_start(), byte_length);
1009       backing_stores_.push_back(std::move(backing_store));
1010       return 0;
1011     }
1012 
1013     case kSandboxedApiReference:
1014     case kApiReference: {
1015       uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
1016       Address address;
1017       if (isolate()->api_external_references()) {
1018         DCHECK_WITH_MSG(reference_id < num_api_references_,
1019                         "too few external references provided through the API");
1020         address = static_cast<Address>(
1021             isolate()->api_external_references()[reference_id]);
1022       } else {
1023         address = reinterpret_cast<Address>(NoExternalReferencesCallback);
1024       }
1025       if (V8_HEAP_SANDBOX_BOOL && data == kSandboxedApiReference) {
1026         return WriteExternalPointer(slot_accessor.slot(), address,
1027                                     kForeignForeignAddressTag);
1028       } else {
1029         DCHECK(!V8_HEAP_SANDBOX_BOOL);
1030         return WriteAddress(slot_accessor.slot(), address);
1031       }
1032     }
1033 
1034     case kClearedWeakReference:
1035       return slot_accessor.Write(HeapObjectReference::ClearedValue(isolate()));
1036 
1037     case kWeakPrefix: {
1038       // We shouldn't have two weak prefixes in a row.
1039       DCHECK(!next_reference_is_weak_);
1040       // We shouldn't have weak refs without a current object.
1041       DCHECK_NE(slot_accessor.object()->address(), kNullAddress);
1042       next_reference_is_weak_ = true;
1043       return 0;
1044     }
1045 
1046     case CASE_RANGE(kRootArrayConstants, 32): {
1047       // First kRootArrayConstantsCount roots are guaranteed to be in
1048       // the old space.
1049       STATIC_ASSERT(static_cast<int>(RootIndex::kFirstImmortalImmovableRoot) ==
1050                     0);
1051       STATIC_ASSERT(kRootArrayConstantsCount <=
1052                     static_cast<int>(RootIndex::kLastImmortalImmovableRoot));
1053 
1054       RootIndex root_index = RootArrayConstant::Decode(data);
1055       Handle<HeapObject> heap_object =
1056           Handle<HeapObject>::cast(isolate()->root_handle(root_index));
1057       return slot_accessor.Write(heap_object, HeapObjectReferenceType::STRONG);
1058     }
1059 
1060     case CASE_RANGE(kHotObject, 8): {
1061       int index = HotObject::Decode(data);
1062       Handle<HeapObject> hot_object = hot_objects_.Get(index);
1063       return slot_accessor.Write(hot_object, GetAndResetNextReferenceType());
1064     }
1065 
1066     case CASE_RANGE(kFixedRawData, 32): {
1067       // Deserialize raw data of fixed length from 1 to 32 times kTaggedSize.
1068       int size_in_tagged = FixedRawDataWithSize::Decode(data);
1069       STATIC_ASSERT(TSlot::kSlotDataSize == kTaggedSize ||
1070                     TSlot::kSlotDataSize == 2 * kTaggedSize);
1071       int size_in_slots = size_in_tagged / (TSlot::kSlotDataSize / kTaggedSize);
1072       // kFixedRawData can have kTaggedSize != TSlot::kSlotDataSize when
1073       // serializing Smi roots in pointer-compressed builds. In this case, the
1074       // size in bytes is unconditionally the (full) slot size.
1075       DCHECK_IMPLIES(kTaggedSize != TSlot::kSlotDataSize, size_in_slots == 1);
1076       // TODO(leszeks): Only copy slots when there are Smis in the serialized
1077       // data.
1078       source_.CopySlots(slot_accessor.slot().location(), size_in_slots);
1079       return size_in_slots;
1080     }
1081 
1082     case CASE_RANGE(kFixedRepeat, 16): {
1083       int repeats = FixedRepeatWithCount::Decode(data);
1084       return ReadRepeatedObject(slot_accessor, repeats);
1085     }
1086 
1087 #ifdef DEBUG
1088 #define UNUSED_CASE(byte_code) \
1089   case byte_code:              \
1090     UNREACHABLE();
1091       UNUSED_SERIALIZER_BYTE_CODES(UNUSED_CASE)
1092 #endif
1093 #undef UNUSED_CASE
1094   }
1095 
1096   // The above switch, including UNUSED_SERIALIZER_BYTE_CODES, covers all
1097   // possible bytecodes; but, clang doesn't realize this, so we have an explicit
1098   // UNREACHABLE here too.
1099   UNREACHABLE();
1100 }
1101 
1102 #undef CASE_RANGE_ALL_SPACES
1103 #undef CASE_RANGE
1104 #undef CASE_R32
1105 #undef CASE_R16
1106 #undef CASE_R8
1107 #undef CASE_R4
1108 #undef CASE_R3
1109 #undef CASE_R2
1110 #undef CASE_R1
1111 
ReadExternalReferenceCase()1112 Address Deserializer::ReadExternalReferenceCase() {
1113   uint32_t reference_id = static_cast<uint32_t>(source_.GetInt());
1114   return isolate()->external_reference_table()->address(reference_id);
1115 }
1116 
1117 namespace {
SpaceToType(SnapshotSpace space)1118 AllocationType SpaceToType(SnapshotSpace space) {
1119   switch (space) {
1120     case SnapshotSpace::kCode:
1121       return AllocationType::kCode;
1122     case SnapshotSpace::kMap:
1123       return AllocationType::kMap;
1124     case SnapshotSpace::kOld:
1125       return AllocationType::kOld;
1126     case SnapshotSpace::kReadOnlyHeap:
1127       return AllocationType::kReadOnly;
1128   }
1129 }
1130 }  // namespace
1131 
Allocate(SnapshotSpace space,int size,AllocationAlignment alignment)1132 HeapObject Deserializer::Allocate(SnapshotSpace space, int size,
1133                                   AllocationAlignment alignment) {
1134 #ifdef DEBUG
1135   if (!previous_allocation_obj_.is_null()) {
1136     // Make sure that the previous object is initialized sufficiently to
1137     // be iterated over by the GC.
1138     int object_size = previous_allocation_obj_->Size();
1139     DCHECK_LE(object_size, previous_allocation_size_);
1140   }
1141 #endif
1142 
1143   HeapObject obj = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
1144       size, SpaceToType(space), AllocationOrigin::kRuntime, alignment);
1145 
1146 #ifdef DEBUG
1147   previous_allocation_obj_ = handle(obj, isolate());
1148   previous_allocation_size_ = size;
1149 #endif
1150 
1151   return obj;
1152 }
1153 
1154 }  // namespace internal
1155 }  // namespace v8
1156