• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/snapshot/serializer.h"
6 
7 #include "src/codegen/assembler-inl.h"
8 #include "src/common/globals.h"
9 #include "src/heap/heap-inl.h"  // For Space::identity().
10 #include "src/heap/memory-chunk-inl.h"
11 #include "src/heap/read-only-heap.h"
12 #include "src/interpreter/interpreter.h"
13 #include "src/objects/code.h"
14 #include "src/objects/js-array-buffer-inl.h"
15 #include "src/objects/js-array-inl.h"
16 #include "src/objects/map.h"
17 #include "src/objects/objects-body-descriptors-inl.h"
18 #include "src/objects/slots-inl.h"
19 #include "src/objects/smi.h"
20 #include "src/snapshot/serializer-deserializer.h"
21 
22 namespace v8 {
23 namespace internal {
24 
Serializer(Isolate * isolate,Snapshot::SerializerFlags flags)25 Serializer::Serializer(Isolate* isolate, Snapshot::SerializerFlags flags)
26     : isolate_(isolate),
27       hot_objects_(isolate->heap()),
28       reference_map_(isolate),
29       external_reference_encoder_(isolate),
30       root_index_map_(isolate),
31       deferred_objects_(isolate->heap()),
32       forward_refs_per_pending_object_(isolate->heap()),
33       flags_(flags)
34 #ifdef DEBUG
35       ,
36       back_refs_(isolate->heap()),
37       stack_(isolate->heap())
38 #endif
39 {
40 #ifdef OBJECT_PRINT
41   if (FLAG_serialization_statistics) {
42     for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) {
43       // Value-initialized to 0.
44       instance_type_count_[space] = std::make_unique<int[]>(kInstanceTypes);
45       instance_type_size_[space] = std::make_unique<size_t[]>(kInstanceTypes);
46     }
47   }
48 #endif  // OBJECT_PRINT
49 }
50 
CountAllocation(Map map,int size,SnapshotSpace space)51 void Serializer::CountAllocation(Map map, int size, SnapshotSpace space) {
52   DCHECK(FLAG_serialization_statistics);
53 
54   const int space_number = static_cast<int>(space);
55   allocation_size_[space_number] += size;
56 #ifdef OBJECT_PRINT
57   int instance_type = map.instance_type();
58   instance_type_count_[space_number][instance_type]++;
59   instance_type_size_[space_number][instance_type] += size;
60 #endif  // OBJECT_PRINT
61 }
62 
TotalAllocationSize() const63 int Serializer::TotalAllocationSize() const {
64   int sum = 0;
65   for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
66     sum += allocation_size_[space];
67   }
68   return sum;
69 }
70 
OutputStatistics(const char * name)71 void Serializer::OutputStatistics(const char* name) {
72   if (!FLAG_serialization_statistics) return;
73 
74   PrintF("%s:\n", name);
75 
76   PrintF("  Spaces (bytes):\n");
77 
78   for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
79     PrintF("%16s",
80            BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)));
81   }
82   PrintF("\n");
83 
84   for (int space = 0; space < kNumberOfSnapshotSpaces; space++) {
85     PrintF("%16zu", allocation_size_[space]);
86   }
87 
88 #ifdef OBJECT_PRINT
89   PrintF("  Instance types (count and bytes):\n");
90 #define PRINT_INSTANCE_TYPE(Name)                                          \
91   for (int space = 0; space < kNumberOfSnapshotSpaces; ++space) {          \
92     if (instance_type_count_[space][Name]) {                               \
93       PrintF("%10d %10zu  %-10s %s\n", instance_type_count_[space][Name],  \
94              instance_type_size_[space][Name],                             \
95              BaseSpace::GetSpaceName(static_cast<AllocationSpace>(space)), \
96              #Name);                                                       \
97     }                                                                      \
98   }
99   INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
100 #undef PRINT_INSTANCE_TYPE
101 
102   PrintF("\n");
103 #endif  // OBJECT_PRINT
104 }
105 
SerializeDeferredObjects()106 void Serializer::SerializeDeferredObjects() {
107   if (FLAG_trace_serializer) {
108     PrintF("Serializing deferred objects\n");
109   }
110   WHILE_WITH_HANDLE_SCOPE(isolate(), !deferred_objects_.empty(), {
111     Handle<HeapObject> obj = handle(deferred_objects_.Pop(), isolate());
112 
113     ObjectSerializer obj_serializer(this, obj, &sink_);
114     obj_serializer.SerializeDeferred();
115   });
116   sink_.Put(kSynchronize, "Finished with deferred objects");
117 }
118 
SerializeObject(Handle<HeapObject> obj)119 void Serializer::SerializeObject(Handle<HeapObject> obj) {
120   // ThinStrings are just an indirection to an internalized string, so elide the
121   // indirection and serialize the actual string directly.
122   if (obj->IsThinString(isolate())) {
123     obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
124   }
125   SerializeObjectImpl(obj);
126 }
127 
MustBeDeferred(HeapObject object)128 bool Serializer::MustBeDeferred(HeapObject object) { return false; }
129 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)130 void Serializer::VisitRootPointers(Root root, const char* description,
131                                    FullObjectSlot start, FullObjectSlot end) {
132   for (FullObjectSlot current = start; current < end; ++current) {
133     SerializeRootObject(current);
134   }
135 }
136 
SerializeRootObject(FullObjectSlot slot)137 void Serializer::SerializeRootObject(FullObjectSlot slot) {
138   Object o = *slot;
139   if (o.IsSmi()) {
140     PutSmiRoot(slot);
141   } else {
142     SerializeObject(Handle<HeapObject>(slot.location()));
143   }
144 }
145 
146 #ifdef DEBUG
PrintStack()147 void Serializer::PrintStack() { PrintStack(std::cout); }
148 
PrintStack(std::ostream & out)149 void Serializer::PrintStack(std::ostream& out) {
150   for (const auto o : stack_) {
151     o->Print(out);
152     out << "\n";
153   }
154 }
155 #endif  // DEBUG
156 
SerializeRoot(Handle<HeapObject> obj)157 bool Serializer::SerializeRoot(Handle<HeapObject> obj) {
158   RootIndex root_index;
159   // Derived serializers are responsible for determining if the root has
160   // actually been serialized before calling this.
161   if (root_index_map()->Lookup(*obj, &root_index)) {
162     PutRoot(root_index);
163     return true;
164   }
165   return false;
166 }
167 
SerializeHotObject(Handle<HeapObject> obj)168 bool Serializer::SerializeHotObject(Handle<HeapObject> obj) {
169   // Encode a reference to a hot object by its index in the working set.
170   int index = hot_objects_.Find(*obj);
171   if (index == HotObjectsList::kNotFound) return false;
172   DCHECK(index >= 0 && index < kHotObjectCount);
173   if (FLAG_trace_serializer) {
174     PrintF(" Encoding hot object %d:", index);
175     obj->ShortPrint();
176     PrintF("\n");
177   }
178   sink_.Put(HotObject::Encode(index), "HotObject");
179   return true;
180 }
181 
SerializeBackReference(Handle<HeapObject> obj)182 bool Serializer::SerializeBackReference(Handle<HeapObject> obj) {
183   const SerializerReference* reference = reference_map_.LookupReference(obj);
184   if (reference == nullptr) return false;
185   // Encode the location of an already deserialized object in order to write
186   // its location into a later object.  We can encode the location as an
187   // offset fromthe start of the deserialized objects or as an offset
188   // backwards from thecurrent allocation pointer.
189   if (reference->is_attached_reference()) {
190     if (FLAG_trace_serializer) {
191       PrintF(" Encoding attached reference %d\n",
192              reference->attached_reference_index());
193     }
194     PutAttachedReference(*reference);
195   } else {
196     DCHECK(reference->is_back_reference());
197     if (FLAG_trace_serializer) {
198       PrintF(" Encoding back reference to: ");
199       obj->ShortPrint();
200       PrintF("\n");
201     }
202 
203     sink_.Put(kBackref, "Backref");
204     PutBackReference(obj, *reference);
205   }
206   return true;
207 }
208 
SerializePendingObject(Handle<HeapObject> obj)209 bool Serializer::SerializePendingObject(Handle<HeapObject> obj) {
210   PendingObjectReferences* refs_to_object =
211       forward_refs_per_pending_object_.Find(obj);
212   if (refs_to_object == nullptr) {
213     return false;
214   }
215 
216   PutPendingForwardReference(*refs_to_object);
217   return true;
218 }
219 
ObjectIsBytecodeHandler(Handle<HeapObject> obj) const220 bool Serializer::ObjectIsBytecodeHandler(Handle<HeapObject> obj) const {
221   if (!obj->IsCode()) return false;
222   return (Code::cast(*obj).kind() == CodeKind::BYTECODE_HANDLER);
223 }
224 
PutRoot(RootIndex root)225 void Serializer::PutRoot(RootIndex root) {
226   int root_index = static_cast<int>(root);
227   Handle<HeapObject> object =
228       Handle<HeapObject>::cast(isolate()->root_handle(root));
229   if (FLAG_trace_serializer) {
230     PrintF(" Encoding root %d:", root_index);
231     object->ShortPrint();
232     PrintF("\n");
233   }
234 
235   // Assert that the first 32 root array items are a conscious choice. They are
236   // chosen so that the most common ones can be encoded more efficiently.
237   STATIC_ASSERT(static_cast<int>(RootIndex::kArgumentsMarker) ==
238                 kRootArrayConstantsCount - 1);
239 
240   // TODO(ulan): Check that it works with young large objects.
241   if (root_index < kRootArrayConstantsCount &&
242       !Heap::InYoungGeneration(*object)) {
243     sink_.Put(RootArrayConstant::Encode(root), "RootConstant");
244   } else {
245     sink_.Put(kRootArray, "RootSerialization");
246     sink_.PutInt(root_index, "root_index");
247     hot_objects_.Add(*object);
248   }
249 }
250 
PutSmiRoot(FullObjectSlot slot)251 void Serializer::PutSmiRoot(FullObjectSlot slot) {
252   // Serializing a smi root in compressed pointer builds will serialize the
253   // full object slot (of kSystemPointerSize) to avoid complications during
254   // deserialization (endianness or smi sequences).
255   STATIC_ASSERT(decltype(slot)::kSlotDataSize == sizeof(Address));
256   STATIC_ASSERT(decltype(slot)::kSlotDataSize == kSystemPointerSize);
257   static constexpr int bytes_to_output = decltype(slot)::kSlotDataSize;
258   static constexpr int size_in_tagged = bytes_to_output >> kTaggedSizeLog2;
259   sink_.Put(FixedRawDataWithSize::Encode(size_in_tagged), "Smi");
260 
261   Address raw_value = Smi::cast(*slot).ptr();
262   const byte* raw_value_as_bytes = reinterpret_cast<const byte*>(&raw_value);
263   sink_.PutRaw(raw_value_as_bytes, bytes_to_output, "Bytes");
264 }
265 
PutBackReference(Handle<HeapObject> object,SerializerReference reference)266 void Serializer::PutBackReference(Handle<HeapObject> object,
267                                   SerializerReference reference) {
268   DCHECK_EQ(*object, *back_refs_[reference.back_ref_index()]);
269   sink_.PutInt(reference.back_ref_index(), "BackRefIndex");
270   hot_objects_.Add(*object);
271 }
272 
PutAttachedReference(SerializerReference reference)273 void Serializer::PutAttachedReference(SerializerReference reference) {
274   DCHECK(reference.is_attached_reference());
275   sink_.Put(kAttachedReference, "AttachedRef");
276   sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
277 }
278 
PutRepeat(int repeat_count)279 void Serializer::PutRepeat(int repeat_count) {
280   if (repeat_count <= kLastEncodableFixedRepeatCount) {
281     sink_.Put(FixedRepeatWithCount::Encode(repeat_count), "FixedRepeat");
282   } else {
283     sink_.Put(kVariableRepeat, "VariableRepeat");
284     sink_.PutInt(VariableRepeatCount::Encode(repeat_count), "repeat count");
285   }
286 }
287 
PutPendingForwardReference(PendingObjectReferences & refs)288 void Serializer::PutPendingForwardReference(PendingObjectReferences& refs) {
289   sink_.Put(kRegisterPendingForwardRef, "RegisterPendingForwardRef");
290   unresolved_forward_refs_++;
291   // Register the current slot with the pending object.
292   int forward_ref_id = next_forward_ref_id_++;
293   if (refs == nullptr) {
294     // The IdentityMap holding the pending object reference vectors does not
295     // support non-trivial types; in particular it doesn't support destructors
296     // on values. So, we manually allocate a vector with new, and delete it when
297     // resolving the pending object.
298     refs = new std::vector<int>();
299   }
300   refs->push_back(forward_ref_id);
301 }
302 
ResolvePendingForwardReference(int forward_reference_id)303 void Serializer::ResolvePendingForwardReference(int forward_reference_id) {
304   sink_.Put(kResolvePendingForwardRef, "ResolvePendingForwardRef");
305   sink_.PutInt(forward_reference_id, "with this index");
306   unresolved_forward_refs_--;
307 
308   // If there are no more unresolved forward refs, reset the forward ref id to
309   // zero so that future forward refs compress better.
310   if (unresolved_forward_refs_ == 0) {
311     next_forward_ref_id_ = 0;
312   }
313 }
314 
RegisterObjectIsPending(Handle<HeapObject> obj)315 void Serializer::RegisterObjectIsPending(Handle<HeapObject> obj) {
316   if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) return;
317 
318   // Add the given object to the pending objects -> forward refs map.
319   auto find_result = forward_refs_per_pending_object_.FindOrInsert(obj);
320   USE(find_result);
321 
322   // If the above emplace didn't actually add the object, then the object must
323   // already have been registered pending by deferring. It might not be in the
324   // deferred objects queue though, since it may be the very object we just
325   // popped off that queue, so just check that it can be deferred.
326   DCHECK_IMPLIES(find_result.already_exists, *find_result.entry != nullptr);
327   DCHECK_IMPLIES(find_result.already_exists, CanBeDeferred(*obj));
328 }
329 
ResolvePendingObject(Handle<HeapObject> obj)330 void Serializer::ResolvePendingObject(Handle<HeapObject> obj) {
331   if (*obj == ReadOnlyRoots(isolate()).not_mapped_symbol()) return;
332 
333   std::vector<int>* refs;
334   CHECK(forward_refs_per_pending_object_.Delete(obj, &refs));
335   if (refs) {
336     for (int index : *refs) {
337       ResolvePendingForwardReference(index);
338     }
339     // See PutPendingForwardReference -- we have to manually manage the memory
340     // of non-trivial IdentityMap values.
341     delete refs;
342   }
343 }
344 
Pad(int padding_offset)345 void Serializer::Pad(int padding_offset) {
346   // The non-branching GetInt will read up to 3 bytes too far, so we need
347   // to pad the snapshot to make sure we don't read over the end.
348   for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
349     sink_.Put(kNop, "Padding");
350   }
351   // Pad up to pointer size for checksum.
352   while (!IsAligned(sink_.Position() + padding_offset, kPointerAlignment)) {
353     sink_.Put(kNop, "Padding");
354   }
355 }
356 
InitializeCodeAddressMap()357 void Serializer::InitializeCodeAddressMap() {
358   isolate_->InitializeLoggingAndCounters();
359   code_address_map_ = std::make_unique<CodeAddressMap>(isolate_);
360 }
361 
CopyCode(Code code)362 Code Serializer::CopyCode(Code code) {
363   code_buffer_.clear();  // Clear buffer without deleting backing store.
364   int size = code.CodeSize();
365   code_buffer_.insert(code_buffer_.end(),
366                       reinterpret_cast<byte*>(code.address()),
367                       reinterpret_cast<byte*>(code.address() + size));
368   // When pointer compression is enabled the checked cast will try to
369   // decompress map field of off-heap Code object.
370   return Code::unchecked_cast(HeapObject::FromAddress(
371       reinterpret_cast<Address>(&code_buffer_.front())));
372 }
373 
SerializePrologue(SnapshotSpace space,int size,Map map)374 void Serializer::ObjectSerializer::SerializePrologue(SnapshotSpace space,
375                                                      int size, Map map) {
376   if (serializer_->code_address_map_) {
377     const char* code_name =
378         serializer_->code_address_map_->Lookup(object_->address());
379     LOG(serializer_->isolate_,
380         CodeNameEvent(object_->address(), sink_->Position(), code_name));
381   }
382 
383   if (map == *object_) {
384     DCHECK_EQ(*object_, ReadOnlyRoots(isolate()).meta_map());
385     DCHECK_EQ(space, SnapshotSpace::kReadOnlyHeap);
386     sink_->Put(kNewMetaMap, "NewMetaMap");
387 
388     DCHECK_EQ(size, Map::kSize);
389   } else {
390     sink_->Put(NewObject::Encode(space), "NewObject");
391 
392     // TODO(leszeks): Skip this when the map has a fixed size.
393     sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
394 
395     // Until the space for the object is allocated, it is considered "pending".
396     serializer_->RegisterObjectIsPending(object_);
397 
398     // Serialize map (first word of the object) before anything else, so that
399     // the deserializer can access it when allocating. Make sure that the map
400     // isn't a pending object.
401     DCHECK_NULL(serializer_->forward_refs_per_pending_object_.Find(map));
402     DCHECK(map.IsMap());
403     serializer_->SerializeObject(handle(map, isolate()));
404 
405     // Make sure the map serialization didn't accidentally recursively serialize
406     // this object.
407     DCHECK_IMPLIES(
408         *object_ != ReadOnlyRoots(isolate()).not_mapped_symbol(),
409         serializer_->reference_map()->LookupReference(object_) == nullptr);
410 
411     // Now that the object is allocated, we can resolve pending references to
412     // it.
413     serializer_->ResolvePendingObject(object_);
414   }
415 
416   if (FLAG_serialization_statistics) {
417     serializer_->CountAllocation(object_->map(), size, space);
418   }
419 
420   // Mark this object as already serialized, and add it to the reference map so
421   // that it can be accessed by backreference by future objects.
422   serializer_->num_back_refs_++;
423 #ifdef DEBUG
424   serializer_->back_refs_.Push(*object_);
425   DCHECK_EQ(serializer_->back_refs_.size(), serializer_->num_back_refs_);
426 #endif
427   if (*object_ != ReadOnlyRoots(isolate()).not_mapped_symbol()) {
428     // Only add the object to the map if it's not not_mapped_symbol, else
429     // the reference IdentityMap has issues. We don't expect to have back
430     // references to the not_mapped_symbol anyway, so it's fine.
431     SerializerReference back_reference =
432         SerializerReference::BackReference(serializer_->num_back_refs_ - 1);
433     serializer_->reference_map()->Add(*object_, back_reference);
434     DCHECK_EQ(*object_,
435               *serializer_->back_refs_[back_reference.back_ref_index()]);
436     DCHECK_EQ(back_reference.back_ref_index(), serializer_->reference_map()
437                                                    ->LookupReference(object_)
438                                                    ->back_ref_index());
439   }
440 }
441 
SerializeBackingStore(void * backing_store,int32_t byte_length)442 uint32_t Serializer::ObjectSerializer::SerializeBackingStore(
443     void* backing_store, int32_t byte_length) {
444   const SerializerReference* reference_ptr =
445       serializer_->reference_map()->LookupBackingStore(backing_store);
446 
447   // Serialize the off-heap backing store.
448   if (!reference_ptr) {
449     sink_->Put(kOffHeapBackingStore, "Off-heap backing store");
450     sink_->PutInt(byte_length, "length");
451     sink_->PutRaw(static_cast<byte*>(backing_store), byte_length,
452                   "BackingStore");
453     DCHECK_NE(0, serializer_->seen_backing_stores_index_);
454     SerializerReference reference =
455         SerializerReference::OffHeapBackingStoreReference(
456             serializer_->seen_backing_stores_index_++);
457     // Mark this backing store as already serialized.
458     serializer_->reference_map()->AddBackingStore(backing_store, reference);
459     return reference.off_heap_backing_store_index();
460   } else {
461     return reference_ptr->off_heap_backing_store_index();
462   }
463 }
464 
SerializeJSTypedArray()465 void Serializer::ObjectSerializer::SerializeJSTypedArray() {
466   Handle<JSTypedArray> typed_array = Handle<JSTypedArray>::cast(object_);
467   if (typed_array->is_on_heap()) {
468     typed_array->RemoveExternalPointerCompensationForSerialization(isolate());
469   } else {
470     if (!typed_array->WasDetached()) {
471       // Explicitly serialize the backing store now.
472       JSArrayBuffer buffer = JSArrayBuffer::cast(typed_array->buffer());
473       // We cannot store byte_length larger than int32 range in the snapshot.
474       CHECK_LE(buffer.byte_length(), std::numeric_limits<int32_t>::max());
475       int32_t byte_length = static_cast<int32_t>(buffer.byte_length());
476       size_t byte_offset = typed_array->byte_offset();
477 
478       // We need to calculate the backing store from the data pointer
479       // because the ArrayBuffer may already have been serialized.
480       void* backing_store = reinterpret_cast<void*>(
481           reinterpret_cast<Address>(typed_array->DataPtr()) - byte_offset);
482 
483       uint32_t ref = SerializeBackingStore(backing_store, byte_length);
484       typed_array->SetExternalBackingStoreRefForSerialization(ref);
485     } else {
486       typed_array->SetExternalBackingStoreRefForSerialization(0);
487     }
488   }
489   SerializeObject();
490 }
491 
SerializeJSArrayBuffer()492 void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
493   Handle<JSArrayBuffer> buffer = Handle<JSArrayBuffer>::cast(object_);
494   void* backing_store = buffer->backing_store();
495   // We cannot store byte_length larger than int32 range in the snapshot.
496   CHECK_LE(buffer->byte_length(), std::numeric_limits<int32_t>::max());
497   int32_t byte_length = static_cast<int32_t>(buffer->byte_length());
498   ArrayBufferExtension* extension = buffer->extension();
499 
500   // The embedder-allocated backing store only exists for the off-heap case.
501 #ifdef V8_HEAP_SANDBOX
502   uint32_t external_pointer_entry =
503       buffer->GetBackingStoreRefForDeserialization();
504 #endif
505   if (backing_store != nullptr) {
506     uint32_t ref = SerializeBackingStore(backing_store, byte_length);
507     buffer->SetBackingStoreRefForSerialization(ref);
508 
509     // Ensure deterministic output by setting extension to null during
510     // serialization.
511     buffer->set_extension(nullptr);
512   } else {
513     buffer->SetBackingStoreRefForSerialization(kNullRefSentinel);
514   }
515 
516   SerializeObject();
517 
518 #ifdef V8_HEAP_SANDBOX
519   buffer->SetBackingStoreRefForSerialization(external_pointer_entry);
520 #else
521   buffer->set_backing_store(isolate(), backing_store);
522 #endif
523   buffer->set_extension(extension);
524 }
525 
SerializeExternalString()526 void Serializer::ObjectSerializer::SerializeExternalString() {
527   // For external strings with known resources, we replace the resource field
528   // with the encoded external reference, which we restore upon deserialize.
529   // For the rest we serialize them to look like ordinary sequential strings.
530   Handle<ExternalString> string = Handle<ExternalString>::cast(object_);
531   Address resource = string->resource_as_address();
532   ExternalReferenceEncoder::Value reference;
533   if (serializer_->external_reference_encoder_.TryEncode(resource).To(
534           &reference)) {
535     DCHECK(reference.is_from_api());
536 #ifdef V8_HEAP_SANDBOX
537     uint32_t external_pointer_entry =
538         string->GetResourceRefForDeserialization();
539 #endif
540     string->SetResourceRefForSerialization(reference.index());
541     SerializeObject();
542 #ifdef V8_HEAP_SANDBOX
543     string->SetResourceRefForSerialization(external_pointer_entry);
544 #else
545     string->set_address_as_resource(isolate(), resource);
546 #endif
547   } else {
548     SerializeExternalStringAsSequentialString();
549   }
550 }
551 
SerializeExternalStringAsSequentialString()552 void Serializer::ObjectSerializer::SerializeExternalStringAsSequentialString() {
553   // Instead of serializing this as an external string, we serialize
554   // an imaginary sequential string with the same content.
555   ReadOnlyRoots roots(isolate());
556   DCHECK(object_->IsExternalString());
557   Handle<ExternalString> string = Handle<ExternalString>::cast(object_);
558   int length = string->length();
559   Map map;
560   int content_size;
561   int allocation_size;
562   const byte* resource;
563   // Find the map and size for the imaginary sequential string.
564   bool internalized = object_->IsInternalizedString();
565   if (object_->IsExternalOneByteString()) {
566     map = internalized ? roots.one_byte_internalized_string_map()
567                        : roots.one_byte_string_map();
568     allocation_size = SeqOneByteString::SizeFor(length);
569     content_size = length * kCharSize;
570     resource = reinterpret_cast<const byte*>(
571         Handle<ExternalOneByteString>::cast(string)->resource()->data());
572   } else {
573     map = internalized ? roots.internalized_string_map() : roots.string_map();
574     allocation_size = SeqTwoByteString::SizeFor(length);
575     content_size = length * kShortSize;
576     resource = reinterpret_cast<const byte*>(
577         Handle<ExternalTwoByteString>::cast(string)->resource()->data());
578   }
579 
580   SnapshotSpace space = SnapshotSpace::kOld;
581   SerializePrologue(space, allocation_size, map);
582 
583   // Output the rest of the imaginary string.
584   int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
585   DCHECK(IsAligned(bytes_to_output, kTaggedSize));
586   int slots_to_output = bytes_to_output >> kTaggedSizeLog2;
587 
588   // Output raw data header. Do not bother with common raw length cases here.
589   sink_->Put(kVariableRawData, "RawDataForString");
590   sink_->PutInt(slots_to_output, "length");
591 
592   // Serialize string header (except for map).
593   byte* string_start = reinterpret_cast<byte*>(string->address());
594   for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
595     sink_->Put(string_start[i], "StringHeader");
596   }
597 
598   // Serialize string content.
599   sink_->PutRaw(resource, content_size, "StringContent");
600 
601   // Since the allocation size is rounded up to object alignment, there
602   // maybe left-over bytes that need to be padded.
603   int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
604   DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
605   for (int i = 0; i < padding_size; i++)
606     sink_->Put(static_cast<byte>(0), "StringPadding");
607 }
608 
609 // Clear and later restore the next link in the weak cell or allocation site.
610 // TODO(all): replace this with proper iteration of weak slots in serializer.
611 class UnlinkWeakNextScope {
612  public:
UnlinkWeakNextScope(Heap * heap,Handle<HeapObject> object)613   explicit UnlinkWeakNextScope(Heap* heap, Handle<HeapObject> object) {
614     if (object->IsAllocationSite() &&
615         Handle<AllocationSite>::cast(object)->HasWeakNext()) {
616       object_ = object;
617       next_ =
618           handle(AllocationSite::cast(*object).weak_next(), heap->isolate());
619       Handle<AllocationSite>::cast(object)->set_weak_next(
620           ReadOnlyRoots(heap).undefined_value());
621     }
622   }
623 
~UnlinkWeakNextScope()624   ~UnlinkWeakNextScope() {
625     if (!object_.is_null()) {
626       Handle<AllocationSite>::cast(object_)->set_weak_next(
627           *next_, UPDATE_WEAK_WRITE_BARRIER);
628     }
629   }
630 
631  private:
632   Handle<HeapObject> object_;
633   Handle<Object> next_;
634   DISALLOW_HEAP_ALLOCATION(no_gc_)
635 };
636 
Serialize()637 void Serializer::ObjectSerializer::Serialize() {
638   RecursionScope recursion(serializer_);
639 
640   // Defer objects as "pending" if they cannot be serialized now, or if we
641   // exceed a certain recursion depth. Some objects cannot be deferred
642   if ((recursion.ExceedsMaximum() && CanBeDeferred(*object_)) ||
643       serializer_->MustBeDeferred(*object_)) {
644     DCHECK(CanBeDeferred(*object_));
645     if (FLAG_trace_serializer) {
646       PrintF(" Deferring heap object: ");
647       object_->ShortPrint();
648       PrintF("\n");
649     }
650     // Deferred objects are considered "pending".
651     serializer_->RegisterObjectIsPending(object_);
652     serializer_->PutPendingForwardReference(
653         *serializer_->forward_refs_per_pending_object_.Find(object_));
654     serializer_->QueueDeferredObject(object_);
655     return;
656   }
657 
658   if (FLAG_trace_serializer) {
659     PrintF(" Encoding heap object: ");
660     object_->ShortPrint();
661     PrintF("\n");
662   }
663 
664   if (object_->IsExternalString()) {
665     SerializeExternalString();
666     return;
667   } else if (!ReadOnlyHeap::Contains(*object_)) {
668     // Only clear padding for strings outside the read-only heap. Read-only heap
669     // should have been cleared elsewhere.
670     if (object_->IsSeqOneByteString()) {
671       // Clear padding bytes at the end. Done here to avoid having to do this
672       // at allocation sites in generated code.
673       Handle<SeqOneByteString>::cast(object_)->clear_padding();
674     } else if (object_->IsSeqTwoByteString()) {
675       Handle<SeqTwoByteString>::cast(object_)->clear_padding();
676     }
677   }
678   if (object_->IsJSTypedArray()) {
679     SerializeJSTypedArray();
680     return;
681   } else if (object_->IsJSArrayBuffer()) {
682     SerializeJSArrayBuffer();
683     return;
684   }
685 
686   // We don't expect fillers.
687   DCHECK(!object_->IsFreeSpaceOrFiller());
688 
689   if (object_->IsScript()) {
690     // Clear cached line ends.
691     Oddball undefined = ReadOnlyRoots(isolate()).undefined_value();
692     Handle<Script>::cast(object_)->set_line_ends(undefined);
693   }
694 
695   SerializeObject();
696 }
697 
698 namespace {
GetSnapshotSpace(Handle<HeapObject> object)699 SnapshotSpace GetSnapshotSpace(Handle<HeapObject> object) {
700   if (V8_ENABLE_THIRD_PARTY_HEAP_BOOL) {
701     if (object->IsCode()) {
702       return SnapshotSpace::kCode;
703     } else if (ReadOnlyHeap::Contains(*object)) {
704       return SnapshotSpace::kReadOnlyHeap;
705     } else if (object->IsMap()) {
706       return SnapshotSpace::kMap;
707     } else {
708       return SnapshotSpace::kOld;
709     }
710   } else if (ReadOnlyHeap::Contains(*object)) {
711     return SnapshotSpace::kReadOnlyHeap;
712   } else {
713     AllocationSpace heap_space =
714         MemoryChunk::FromHeapObject(*object)->owner_identity();
715     // Large code objects are not supported and cannot be expressed by
716     // SnapshotSpace.
717     DCHECK_NE(heap_space, CODE_LO_SPACE);
718     switch (heap_space) {
719       case OLD_SPACE:
720       // Young generation objects are tenured, as objects that have survived
721       // until snapshot building probably deserve to be considered 'old'.
722       case NEW_SPACE:
723       // Large objects (young and old) are encoded as simply 'old' snapshot
724       // obects, as "normal" objects vs large objects is a heap implementation
725       // detail and isn't relevant to the snapshot.
726       case NEW_LO_SPACE:
727       case LO_SPACE:
728         return SnapshotSpace::kOld;
729       case CODE_SPACE:
730         return SnapshotSpace::kCode;
731       case MAP_SPACE:
732         return SnapshotSpace::kMap;
733       case CODE_LO_SPACE:
734       case RO_SPACE:
735         UNREACHABLE();
736     }
737   }
738 }
739 }  // namespace
740 
SerializeObject()741 void Serializer::ObjectSerializer::SerializeObject() {
742   int size = object_->Size();
743   Map map = object_->map();
744 
745   // Descriptor arrays have complex element weakness, that is dependent on the
746   // maps pointing to them. During deserialization, this can cause them to get
747   // prematurely trimmed one of their owners isn't deserialized yet. We work
748   // around this by forcing all descriptor arrays to be serialized as "strong",
749   // i.e. no custom weakness, and "re-weaken" them in the deserializer once
750   // deserialization completes.
751   //
752   // See also `Deserializer::WeakenDescriptorArrays`.
753   if (map == ReadOnlyRoots(isolate()).descriptor_array_map()) {
754     map = ReadOnlyRoots(isolate()).strong_descriptor_array_map();
755   }
756   SnapshotSpace space = GetSnapshotSpace(object_);
757   SerializePrologue(space, size, map);
758 
759   // Serialize the rest of the object.
760   CHECK_EQ(0, bytes_processed_so_far_);
761   bytes_processed_so_far_ = kTaggedSize;
762 
763   SerializeContent(map, size);
764 }
765 
SerializeDeferred()766 void Serializer::ObjectSerializer::SerializeDeferred() {
767   const SerializerReference* back_reference =
768       serializer_->reference_map()->LookupReference(object_);
769 
770   if (back_reference != nullptr) {
771     if (FLAG_trace_serializer) {
772       PrintF(" Deferred heap object ");
773       object_->ShortPrint();
774       PrintF(" was already serialized\n");
775     }
776     return;
777   }
778 
779   if (FLAG_trace_serializer) {
780     PrintF(" Encoding deferred heap object\n");
781   }
782   Serialize();
783 }
784 
SerializeContent(Map map,int size)785 void Serializer::ObjectSerializer::SerializeContent(Map map, int size) {
786   UnlinkWeakNextScope unlink_weak_next(isolate()->heap(), object_);
787   if (object_->IsCode()) {
788     // For code objects, perform a custom serialization.
789     SerializeCode(map, size);
790   } else {
791     // For other objects, iterate references first.
792     object_->IterateBody(map, size, this);
793     // Then output data payload, if any.
794     OutputRawData(object_->address() + size);
795   }
796 }
797 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)798 void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
799                                                  ObjectSlot start,
800                                                  ObjectSlot end) {
801   VisitPointers(host, MaybeObjectSlot(start), MaybeObjectSlot(end));
802 }
803 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)804 void Serializer::ObjectSerializer::VisitPointers(HeapObject host,
805                                                  MaybeObjectSlot start,
806                                                  MaybeObjectSlot end) {
807   HandleScope scope(isolate());
808   DisallowGarbageCollection no_gc;
809 
810   MaybeObjectSlot current = start;
811   while (current < end) {
812     while (current < end && (*current)->IsSmi()) {
813       ++current;
814     }
815     if (current < end) {
816       OutputRawData(current.address());
817     }
818     // TODO(ishell): Revisit this change once we stick to 32-bit compressed
819     // tagged values.
820     while (current < end && (*current)->IsCleared()) {
821       sink_->Put(kClearedWeakReference, "ClearedWeakReference");
822       bytes_processed_so_far_ += kTaggedSize;
823       ++current;
824     }
825     HeapObject current_contents;
826     HeapObjectReferenceType reference_type;
827     while (current < end &&
828            (*current)->GetHeapObject(&current_contents, &reference_type)) {
829       // Write a weak prefix if we need it. This has to be done before the
830       // potential pending object serialization.
831       if (reference_type == HeapObjectReferenceType::WEAK) {
832         sink_->Put(kWeakPrefix, "WeakReference");
833       }
834 
835       Handle<HeapObject> obj = handle(current_contents, isolate());
836       if (serializer_->SerializePendingObject(obj)) {
837         bytes_processed_so_far_ += kTaggedSize;
838         ++current;
839         continue;
840       }
841 
842       RootIndex root_index;
843       // Compute repeat count and write repeat prefix if applicable.
844       // Repeats are not subject to the write barrier so we can only use
845       // immortal immovable root members.
846       MaybeObjectSlot repeat_end = current + 1;
847       if (repeat_end < end &&
848           serializer_->root_index_map()->Lookup(*obj, &root_index) &&
849           RootsTable::IsImmortalImmovable(root_index) &&
850           *current == *repeat_end) {
851         DCHECK_EQ(reference_type, HeapObjectReferenceType::STRONG);
852         DCHECK(!Heap::InYoungGeneration(*obj));
853         while (repeat_end < end && *repeat_end == *current) {
854           repeat_end++;
855         }
856         int repeat_count = static_cast<int>(repeat_end - current);
857         current = repeat_end;
858         bytes_processed_so_far_ += repeat_count * kTaggedSize;
859         serializer_->PutRepeat(repeat_count);
860       } else {
861         bytes_processed_so_far_ += kTaggedSize;
862         ++current;
863       }
864       // Now write the object itself.
865       serializer_->SerializeObject(obj);
866     }
867   }
868 }
869 
OutputExternalReference(Address target,int target_size,bool sandboxify)870 void Serializer::ObjectSerializer::OutputExternalReference(Address target,
871                                                            int target_size,
872                                                            bool sandboxify) {
873   DCHECK_LE(target_size, sizeof(target));  // Must fit in Address.
874   ExternalReferenceEncoder::Value encoded_reference;
875   bool encoded_successfully;
876 
877   if (serializer_->allow_unknown_external_references_for_testing()) {
878     encoded_successfully =
879         serializer_->TryEncodeExternalReference(target).To(&encoded_reference);
880   } else {
881     encoded_reference = serializer_->EncodeExternalReference(target);
882     encoded_successfully = true;
883   }
884 
885   if (!encoded_successfully) {
886     // In this case the serialized snapshot will not be used in a different
887     // Isolate and thus the target address will not change between
888     // serialization and deserialization. We can serialize seen external
889     // references verbatim.
890     CHECK(serializer_->allow_unknown_external_references_for_testing());
891     CHECK(IsAligned(target_size, kTaggedSize));
892     CHECK_LE(target_size, kFixedRawDataCount * kTaggedSize);
893     int size_in_tagged = target_size >> kTaggedSizeLog2;
894     sink_->Put(FixedRawDataWithSize::Encode(size_in_tagged), "FixedRawData");
895     sink_->PutRaw(reinterpret_cast<byte*>(&target), target_size, "Bytes");
896   } else if (encoded_reference.is_from_api()) {
897     if (V8_HEAP_SANDBOX_BOOL && sandboxify) {
898       sink_->Put(kSandboxedApiReference, "SandboxedApiRef");
899     } else {
900       sink_->Put(kApiReference, "ApiRef");
901     }
902     sink_->PutInt(encoded_reference.index(), "reference index");
903   } else {
904     if (V8_HEAP_SANDBOX_BOOL && sandboxify) {
905       sink_->Put(kSandboxedExternalReference, "SandboxedExternalRef");
906     } else {
907       sink_->Put(kExternalReference, "ExternalRef");
908     }
909     sink_->PutInt(encoded_reference.index(), "reference index");
910   }
911 }
912 
VisitExternalReference(Foreign host,Address * p)913 void Serializer::ObjectSerializer::VisitExternalReference(Foreign host,
914                                                           Address* p) {
915   // "Sandboxify" external reference.
916   OutputExternalReference(host.foreign_address(), kExternalPointerSize, true);
917   bytes_processed_so_far_ += kExternalPointerSize;
918 }
919 
920 class Serializer::ObjectSerializer::RelocInfoObjectPreSerializer {
921  public:
RelocInfoObjectPreSerializer(Serializer * serializer)922   explicit RelocInfoObjectPreSerializer(Serializer* serializer)
923       : serializer_(serializer) {}
924 
VisitEmbeddedPointer(Code host,RelocInfo * target)925   void VisitEmbeddedPointer(Code host, RelocInfo* target) {
926     Object object = target->target_object();
927     serializer_->SerializeObject(handle(HeapObject::cast(object), isolate()));
928     num_serialized_objects_++;
929   }
VisitCodeTarget(Code host,RelocInfo * target)930   void VisitCodeTarget(Code host, RelocInfo* target) {
931 #ifdef V8_TARGET_ARCH_ARM
932     DCHECK(!RelocInfo::IsRelativeCodeTarget(target->rmode()));
933 #endif
934     Code object = Code::GetCodeFromTargetAddress(target->target_address());
935     serializer_->SerializeObject(handle(object, isolate()));
936     num_serialized_objects_++;
937   }
938 
VisitExternalReference(Code host,RelocInfo * rinfo)939   void VisitExternalReference(Code host, RelocInfo* rinfo) {}
VisitInternalReference(Code host,RelocInfo * rinfo)940   void VisitInternalReference(Code host, RelocInfo* rinfo) {}
VisitRuntimeEntry(Code host,RelocInfo * reloc)941   void VisitRuntimeEntry(Code host, RelocInfo* reloc) { UNREACHABLE(); }
VisitOffHeapTarget(Code host,RelocInfo * target)942   void VisitOffHeapTarget(Code host, RelocInfo* target) {}
943 
num_serialized_objects() const944   int num_serialized_objects() const { return num_serialized_objects_; }
945 
isolate()946   Isolate* isolate() { return serializer_->isolate(); }
947 
948  private:
949   Serializer* serializer_;
950   int num_serialized_objects_ = 0;
951 };
952 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)953 void Serializer::ObjectSerializer::VisitEmbeddedPointer(Code host,
954                                                         RelocInfo* rinfo) {
955   // Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
956   // just track the pointer's existence as kTaggedSize in
957   // bytes_processed_so_far_.
958   // TODO(leszeks): DCHECK that RelocInfoObjectPreSerializer serialized this
959   // specific object already.
960   bytes_processed_so_far_ += kTaggedSize;
961 }
962 
VisitExternalReference(Code host,RelocInfo * rinfo)963 void Serializer::ObjectSerializer::VisitExternalReference(Code host,
964                                                           RelocInfo* rinfo) {
965   Address target = rinfo->target_external_reference();
966   DCHECK_NE(target, kNullAddress);  // Code does not reference null.
967   DCHECK_IMPLIES(serializer_->EncodeExternalReference(target).is_from_api(),
968                  !rinfo->IsCodedSpecially());
969   // Don't "sandboxify" external references embedded in the code.
970   OutputExternalReference(target, rinfo->target_address_size(), false);
971 }
972 
VisitInternalReference(Code host,RelocInfo * rinfo)973 void Serializer::ObjectSerializer::VisitInternalReference(Code host,
974                                                           RelocInfo* rinfo) {
975   Address entry = Handle<Code>::cast(object_)->entry();
976   DCHECK_GE(rinfo->target_internal_reference(), entry);
977   uintptr_t target_offset = rinfo->target_internal_reference() - entry;
978   // TODO(jgruber,v8:11036): We are being permissive for this DCHECK, but
979   // consider using raw_instruction_size() instead of raw_body_size() in the
980   // future.
981   STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
982   DCHECK_LE(target_offset, Handle<Code>::cast(object_)->raw_body_size());
983   sink_->Put(kInternalReference, "InternalRef");
984   sink_->PutInt(target_offset, "internal ref value");
985 }
986 
VisitRuntimeEntry(Code host,RelocInfo * rinfo)987 void Serializer::ObjectSerializer::VisitRuntimeEntry(Code host,
988                                                      RelocInfo* rinfo) {
989   // We no longer serialize code that contains runtime entries.
990   UNREACHABLE();
991 }
992 
VisitOffHeapTarget(Code host,RelocInfo * rinfo)993 void Serializer::ObjectSerializer::VisitOffHeapTarget(Code host,
994                                                       RelocInfo* rinfo) {
995   STATIC_ASSERT(EmbeddedData::kTableSize == Builtins::builtin_count);
996 
997   Address addr = rinfo->target_off_heap_target();
998   CHECK_NE(kNullAddress, addr);
999 
1000   Code target = InstructionStream::TryLookupCode(isolate(), addr);
1001   CHECK(Builtins::IsIsolateIndependentBuiltin(target));
1002 
1003   sink_->Put(kOffHeapTarget, "OffHeapTarget");
1004   sink_->PutInt(target.builtin_index(), "builtin index");
1005 }
1006 
VisitCodeTarget(Code host,RelocInfo * rinfo)1007 void Serializer::ObjectSerializer::VisitCodeTarget(Code host,
1008                                                    RelocInfo* rinfo) {
1009   // Target object should be pre-serialized by RelocInfoObjectPreSerializer, so
1010   // just track the pointer's existence as kTaggedSize in
1011   // bytes_processed_so_far_.
1012   // TODO(leszeks): DCHECK that RelocInfoObjectPreSerializer serialized this
1013   // specific object already.
1014   bytes_processed_so_far_ += kTaggedSize;
1015 }
1016 
1017 namespace {
1018 
1019 // Similar to OutputRawData, but substitutes the given field with the given
1020 // value instead of reading it from the object.
OutputRawWithCustomField(SnapshotByteSink * sink,Address object_start,int written_so_far,int bytes_to_write,int field_offset,int field_size,const byte * field_value)1021 void OutputRawWithCustomField(SnapshotByteSink* sink, Address object_start,
1022                               int written_so_far, int bytes_to_write,
1023                               int field_offset, int field_size,
1024                               const byte* field_value) {
1025   int offset = field_offset - written_so_far;
1026   if (0 <= offset && offset < bytes_to_write) {
1027     DCHECK_GE(bytes_to_write, offset + field_size);
1028     sink->PutRaw(reinterpret_cast<byte*>(object_start + written_so_far), offset,
1029                  "Bytes");
1030     sink->PutRaw(field_value, field_size, "Bytes");
1031     written_so_far += offset + field_size;
1032     bytes_to_write -= offset + field_size;
1033     sink->PutRaw(reinterpret_cast<byte*>(object_start + written_so_far),
1034                  bytes_to_write, "Bytes");
1035   } else {
1036     sink->PutRaw(reinterpret_cast<byte*>(object_start + written_so_far),
1037                  bytes_to_write, "Bytes");
1038   }
1039 }
1040 }  // anonymous namespace
1041 
OutputRawData(Address up_to)1042 void Serializer::ObjectSerializer::OutputRawData(Address up_to) {
1043   Address object_start = object_->address();
1044   int base = bytes_processed_so_far_;
1045   int up_to_offset = static_cast<int>(up_to - object_start);
1046   int to_skip = up_to_offset - bytes_processed_so_far_;
1047   int bytes_to_output = to_skip;
1048   DCHECK(IsAligned(bytes_to_output, kTaggedSize));
1049   int tagged_to_output = bytes_to_output / kTaggedSize;
1050   bytes_processed_so_far_ += to_skip;
1051   DCHECK_GE(to_skip, 0);
1052   if (bytes_to_output != 0) {
1053     DCHECK(to_skip == bytes_to_output);
1054     if (tagged_to_output <= kFixedRawDataCount) {
1055       sink_->Put(FixedRawDataWithSize::Encode(tagged_to_output),
1056                  "FixedRawData");
1057     } else {
1058       sink_->Put(kVariableRawData, "VariableRawData");
1059       sink_->PutInt(tagged_to_output, "length");
1060     }
1061 #ifdef MEMORY_SANITIZER
1062     // Check that we do not serialize uninitialized memory.
1063     __msan_check_mem_is_initialized(
1064         reinterpret_cast<void*>(object_start + base), bytes_to_output);
1065 #endif  // MEMORY_SANITIZER
1066     if (object_->IsBytecodeArray()) {
1067       // The bytecode age field can be changed by GC concurrently.
1068       byte field_value = BytecodeArray::kNoAgeBytecodeAge;
1069       OutputRawWithCustomField(sink_, object_start, base, bytes_to_output,
1070                                BytecodeArray::kBytecodeAgeOffset,
1071                                sizeof(field_value), &field_value);
1072     } else if (object_->IsDescriptorArray()) {
1073       // The number of marked descriptors field can be changed by GC
1074       // concurrently.
1075       byte field_value[2];
1076       field_value[0] = 0;
1077       field_value[1] = 0;
1078       OutputRawWithCustomField(
1079           sink_, object_start, base, bytes_to_output,
1080           DescriptorArray::kRawNumberOfMarkedDescriptorsOffset,
1081           sizeof(field_value), field_value);
1082     } else {
1083       sink_->PutRaw(reinterpret_cast<byte*>(object_start + base),
1084                     bytes_to_output, "Bytes");
1085     }
1086   }
1087 }
1088 
SerializeCode(Map map,int size)1089 void Serializer::ObjectSerializer::SerializeCode(Map map, int size) {
1090   static const int kWipeOutModeMask =
1091       RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
1092       RelocInfo::ModeMask(RelocInfo::FULL_EMBEDDED_OBJECT) |
1093       RelocInfo::ModeMask(RelocInfo::COMPRESSED_EMBEDDED_OBJECT) |
1094       RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
1095       RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
1096       RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
1097       RelocInfo::ModeMask(RelocInfo::OFF_HEAP_TARGET) |
1098       RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
1099 
1100   DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
1101   Handle<Code> on_heap_code = Handle<Code>::cast(object_);
1102 
1103   // With enabled pointer compression normal accessors no longer work for
1104   // off-heap objects, so we have to get the relocation info data via the
1105   // on-heap code object.
1106   ByteArray relocation_info = on_heap_code->unchecked_relocation_info();
1107 
1108   // To make snapshots reproducible, we make a copy of the code object
1109   // and wipe all pointers in the copy, which we then serialize.
1110   Code off_heap_code = serializer_->CopyCode(*on_heap_code);
1111   for (RelocIterator it(off_heap_code, relocation_info, kWipeOutModeMask);
1112        !it.done(); it.next()) {
1113     RelocInfo* rinfo = it.rinfo();
1114     rinfo->WipeOut();
1115   }
1116   // We need to wipe out the header fields *after* wiping out the
1117   // relocations, because some of these fields are needed for the latter.
1118   off_heap_code.WipeOutHeader();
1119 
1120   // Initially skip serializing the code header. We'll serialize it after the
1121   // Code body, so that the various fields the Code needs for iteration are
1122   // already valid.
1123   sink_->Put(kCodeBody, "kCodeBody");
1124 
1125   // Now serialize the wiped off-heap Code, as length + data.
1126   Address start = off_heap_code.address() + Code::kDataStart;
1127   int bytes_to_output = size - Code::kDataStart;
1128   DCHECK(IsAligned(bytes_to_output, kTaggedSize));
1129   int tagged_to_output = bytes_to_output / kTaggedSize;
1130 
1131   sink_->PutInt(tagged_to_output, "length");
1132 
1133 #ifdef MEMORY_SANITIZER
1134   // Check that we do not serialize uninitialized memory.
1135   __msan_check_mem_is_initialized(reinterpret_cast<void*>(start),
1136                                   bytes_to_output);
1137 #endif  // MEMORY_SANITIZER
1138   sink_->PutRaw(reinterpret_cast<byte*>(start), bytes_to_output, "Code");
1139 
1140   // Manually serialize the code header. We don't use Code::BodyDescriptor
1141   // here as we don't yet want to walk the RelocInfos.
1142   DCHECK_EQ(HeapObject::kHeaderSize, bytes_processed_so_far_);
1143   VisitPointers(*on_heap_code, on_heap_code->RawField(HeapObject::kHeaderSize),
1144                 on_heap_code->RawField(Code::kDataStart));
1145   DCHECK_EQ(bytes_processed_so_far_, Code::kDataStart);
1146 
1147   // Now serialize RelocInfos. We can't allocate during a RelocInfo walk during
1148   // deserualization, so we have two passes for RelocInfo serialization:
1149   //   1. A pre-serializer which serializes all allocatable objects in the
1150   //      RelocInfo, followed by a kSynchronize bytecode, and
1151   //   2. A walk the RelocInfo with this serializer, serializing any objects
1152   //      implicitly as offsets into the pre-serializer's object array.
1153   // This way, the deserializer can deserialize the allocatable objects first,
1154   // without walking RelocInfo, re-build the pre-serializer's object array, and
1155   // only then walk the RelocInfo itself.
1156   // TODO(leszeks): We only really need to pre-serialize objects which need
1157   // serialization, i.e. no backrefs or roots.
1158   RelocInfoObjectPreSerializer pre_serializer(serializer_);
1159   for (RelocIterator it(*on_heap_code, relocation_info,
1160                         Code::BodyDescriptor::kRelocModeMask);
1161        !it.done(); it.next()) {
1162     it.rinfo()->Visit(&pre_serializer);
1163   }
1164   // Mark that the pre-serialization finished with a kSynchronize bytecode.
1165   sink_->Put(kSynchronize, "PreSerializationFinished");
1166 
1167   // Finally serialize all RelocInfo objects in the on-heap Code, knowing that
1168   // we will not do a recursive serialization.
1169   // TODO(leszeks): Add a scope that DCHECKs this.
1170   for (RelocIterator it(*on_heap_code, relocation_info,
1171                         Code::BodyDescriptor::kRelocModeMask);
1172        !it.done(); it.next()) {
1173     it.rinfo()->Visit(this);
1174   }
1175 
1176   // We record a kTaggedSize for every object encountered during the
1177   // serialization, so DCHECK that bytes_processed_so_far_ matches the expected
1178   // number of bytes (i.e. the code header + a tagged size per pre-serialized
1179   // object).
1180   DCHECK_EQ(
1181       bytes_processed_so_far_,
1182       Code::kDataStart + kTaggedSize * pre_serializer.num_serialized_objects());
1183 }
1184 
HotObjectsList(Heap * heap)1185 Serializer::HotObjectsList::HotObjectsList(Heap* heap) : heap_(heap) {
1186   strong_roots_entry_ =
1187       heap->RegisterStrongRoots(FullObjectSlot(&circular_queue_[0]),
1188                                 FullObjectSlot(&circular_queue_[kSize]));
1189 }
~HotObjectsList()1190 Serializer::HotObjectsList::~HotObjectsList() {
1191   heap_->UnregisterStrongRoots(strong_roots_entry_);
1192 }
1193 
1194 }  // namespace internal
1195 }  // namespace v8
1196