1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/snapshot/serializer.h"
6
7 #include "src/assembler-inl.h"
8 #include "src/heap/heap-inl.h"
9 #include "src/macro-assembler.h"
10 #include "src/snapshot/natives.h"
11
12 namespace v8 {
13 namespace internal {
14
Serializer(Isolate * isolate)15 Serializer::Serializer(Isolate* isolate)
16 : isolate_(isolate),
17 external_reference_encoder_(isolate),
18 root_index_map_(isolate),
19 recursion_depth_(0),
20 code_address_map_(NULL),
21 num_maps_(0),
22 large_objects_total_size_(0),
23 seen_large_objects_index_(0) {
24 // The serializer is meant to be used only to generate initial heap images
25 // from a context in which there is only one isolate.
26 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
27 pending_chunk_[i] = 0;
28 max_chunk_size_[i] = static_cast<uint32_t>(
29 MemoryAllocator::PageAreaSize(static_cast<AllocationSpace>(i)));
30 }
31
32 #ifdef OBJECT_PRINT
33 if (FLAG_serialization_statistics) {
34 instance_type_count_ = NewArray<int>(kInstanceTypes);
35 instance_type_size_ = NewArray<size_t>(kInstanceTypes);
36 for (int i = 0; i < kInstanceTypes; i++) {
37 instance_type_count_[i] = 0;
38 instance_type_size_[i] = 0;
39 }
40 } else {
41 instance_type_count_ = NULL;
42 instance_type_size_ = NULL;
43 }
44 #endif // OBJECT_PRINT
45 }
46
~Serializer()47 Serializer::~Serializer() {
48 if (code_address_map_ != NULL) delete code_address_map_;
49 #ifdef OBJECT_PRINT
50 if (instance_type_count_ != NULL) {
51 DeleteArray(instance_type_count_);
52 DeleteArray(instance_type_size_);
53 }
54 #endif // OBJECT_PRINT
55 }
56
57 #ifdef OBJECT_PRINT
CountInstanceType(Map * map,int size)58 void Serializer::CountInstanceType(Map* map, int size) {
59 int instance_type = map->instance_type();
60 instance_type_count_[instance_type]++;
61 instance_type_size_[instance_type] += size;
62 }
63 #endif // OBJECT_PRINT
64
OutputStatistics(const char * name)65 void Serializer::OutputStatistics(const char* name) {
66 if (!FLAG_serialization_statistics) return;
67 PrintF("%s:\n", name);
68 PrintF(" Spaces (bytes):\n");
69 for (int space = 0; space < kNumberOfSpaces; space++) {
70 PrintF("%16s", AllocationSpaceName(static_cast<AllocationSpace>(space)));
71 }
72 PrintF("\n");
73 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
74 size_t s = pending_chunk_[space];
75 for (uint32_t chunk_size : completed_chunks_[space]) s += chunk_size;
76 PrintF("%16" PRIuS, s);
77 }
78 PrintF("%16d\n", large_objects_total_size_);
79 #ifdef OBJECT_PRINT
80 PrintF(" Instance types (count and bytes):\n");
81 #define PRINT_INSTANCE_TYPE(Name) \
82 if (instance_type_count_[Name]) { \
83 PrintF("%10d %10" PRIuS " %s\n", instance_type_count_[Name], \
84 instance_type_size_[Name], #Name); \
85 }
86 INSTANCE_TYPE_LIST(PRINT_INSTANCE_TYPE)
87 #undef PRINT_INSTANCE_TYPE
88 PrintF("\n");
89 #endif // OBJECT_PRINT
90 }
91
SerializeDeferredObjects()92 void Serializer::SerializeDeferredObjects() {
93 while (deferred_objects_.length() > 0) {
94 HeapObject* obj = deferred_objects_.RemoveLast();
95 ObjectSerializer obj_serializer(this, obj, &sink_, kPlain, kStartOfObject);
96 obj_serializer.SerializeDeferred();
97 }
98 sink_.Put(kSynchronize, "Finished with deferred objects");
99 }
100
VisitPointers(Object ** start,Object ** end)101 void Serializer::VisitPointers(Object** start, Object** end) {
102 for (Object** current = start; current < end; current++) {
103 if ((*current)->IsSmi()) {
104 PutSmi(Smi::cast(*current));
105 } else {
106 SerializeObject(HeapObject::cast(*current), kPlain, kStartOfObject, 0);
107 }
108 }
109 }
110
EncodeReservations(List<SerializedData::Reservation> * out) const111 void Serializer::EncodeReservations(
112 List<SerializedData::Reservation>* out) const {
113 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
114 for (int j = 0; j < completed_chunks_[i].length(); j++) {
115 out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
116 }
117
118 if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
119 out->Add(SerializedData::Reservation(pending_chunk_[i]));
120 }
121 out->last().mark_as_last();
122 }
123 out->Add(SerializedData::Reservation(num_maps_ * Map::kSize));
124 out->last().mark_as_last();
125 out->Add(SerializedData::Reservation(large_objects_total_size_));
126 out->last().mark_as_last();
127 }
128
129 #ifdef DEBUG
BackReferenceIsAlreadyAllocated(SerializerReference reference)130 bool Serializer::BackReferenceIsAlreadyAllocated(
131 SerializerReference reference) {
132 DCHECK(reference.is_back_reference());
133 AllocationSpace space = reference.space();
134 if (space == LO_SPACE) {
135 return reference.large_object_index() < seen_large_objects_index_;
136 } else if (space == MAP_SPACE) {
137 return reference.map_index() < num_maps_;
138 } else {
139 int chunk_index = reference.chunk_index();
140 if (chunk_index == completed_chunks_[space].length()) {
141 return reference.chunk_offset() < pending_chunk_[space];
142 } else {
143 return chunk_index < completed_chunks_[space].length() &&
144 reference.chunk_offset() < completed_chunks_[space][chunk_index];
145 }
146 }
147 }
148 #endif // DEBUG
149
SerializeHotObject(HeapObject * obj,HowToCode how_to_code,WhereToPoint where_to_point,int skip)150 bool Serializer::SerializeHotObject(HeapObject* obj, HowToCode how_to_code,
151 WhereToPoint where_to_point, int skip) {
152 if (how_to_code != kPlain || where_to_point != kStartOfObject) return false;
153 // Encode a reference to a hot object by its index in the working set.
154 int index = hot_objects_.Find(obj);
155 if (index == HotObjectsList::kNotFound) return false;
156 DCHECK(index >= 0 && index < kNumberOfHotObjects);
157 if (FLAG_trace_serializer) {
158 PrintF(" Encoding hot object %d:", index);
159 obj->ShortPrint();
160 PrintF("\n");
161 }
162 if (skip != 0) {
163 sink_.Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
164 sink_.PutInt(skip, "HotObjectSkipDistance");
165 } else {
166 sink_.Put(kHotObject + index, "HotObject");
167 }
168 return true;
169 }
SerializeBackReference(HeapObject * obj,HowToCode how_to_code,WhereToPoint where_to_point,int skip)170 bool Serializer::SerializeBackReference(HeapObject* obj, HowToCode how_to_code,
171 WhereToPoint where_to_point, int skip) {
172 SerializerReference reference = reference_map_.Lookup(obj);
173 if (!reference.is_valid()) return false;
174 // Encode the location of an already deserialized object in order to write
175 // its location into a later object. We can encode the location as an
176 // offset fromthe start of the deserialized objects or as an offset
177 // backwards from thecurrent allocation pointer.
178 if (reference.is_attached_reference()) {
179 FlushSkip(skip);
180 if (FLAG_trace_serializer) {
181 PrintF(" Encoding attached reference %d\n",
182 reference.attached_reference_index());
183 }
184 PutAttachedReference(reference, how_to_code, where_to_point);
185 } else {
186 DCHECK(reference.is_back_reference());
187 if (FLAG_trace_serializer) {
188 PrintF(" Encoding back reference to: ");
189 obj->ShortPrint();
190 PrintF("\n");
191 }
192
193 PutAlignmentPrefix(obj);
194 AllocationSpace space = reference.space();
195 if (skip == 0) {
196 sink_.Put(kBackref + how_to_code + where_to_point + space, "BackRef");
197 } else {
198 sink_.Put(kBackrefWithSkip + how_to_code + where_to_point + space,
199 "BackRefWithSkip");
200 sink_.PutInt(skip, "BackRefSkipDistance");
201 }
202 PutBackReference(obj, reference);
203 }
204 return true;
205 }
206
PutRoot(int root_index,HeapObject * object,SerializerDeserializer::HowToCode how_to_code,SerializerDeserializer::WhereToPoint where_to_point,int skip)207 void Serializer::PutRoot(int root_index, HeapObject* object,
208 SerializerDeserializer::HowToCode how_to_code,
209 SerializerDeserializer::WhereToPoint where_to_point,
210 int skip) {
211 if (FLAG_trace_serializer) {
212 PrintF(" Encoding root %d:", root_index);
213 object->ShortPrint();
214 PrintF("\n");
215 }
216
217 // Assert that the first 32 root array items are a conscious choice. They are
218 // chosen so that the most common ones can be encoded more efficiently.
219 STATIC_ASSERT(Heap::kEmptyDescriptorArrayRootIndex ==
220 kNumberOfRootArrayConstants - 1);
221
222 if (how_to_code == kPlain && where_to_point == kStartOfObject &&
223 root_index < kNumberOfRootArrayConstants &&
224 !isolate()->heap()->InNewSpace(object)) {
225 if (skip == 0) {
226 sink_.Put(kRootArrayConstants + root_index, "RootConstant");
227 } else {
228 sink_.Put(kRootArrayConstantsWithSkip + root_index, "RootConstant");
229 sink_.PutInt(skip, "SkipInPutRoot");
230 }
231 } else {
232 FlushSkip(skip);
233 sink_.Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
234 sink_.PutInt(root_index, "root_index");
235 hot_objects_.Add(object);
236 }
237 }
238
PutSmi(Smi * smi)239 void Serializer::PutSmi(Smi* smi) {
240 sink_.Put(kOnePointerRawData, "Smi");
241 byte* bytes = reinterpret_cast<byte*>(&smi);
242 for (int i = 0; i < kPointerSize; i++) sink_.Put(bytes[i], "Byte");
243 }
244
PutBackReference(HeapObject * object,SerializerReference reference)245 void Serializer::PutBackReference(HeapObject* object,
246 SerializerReference reference) {
247 DCHECK(BackReferenceIsAlreadyAllocated(reference));
248 sink_.PutInt(reference.back_reference(), "BackRefValue");
249 hot_objects_.Add(object);
250 }
251
PutAttachedReference(SerializerReference reference,HowToCode how_to_code,WhereToPoint where_to_point)252 void Serializer::PutAttachedReference(SerializerReference reference,
253 HowToCode how_to_code,
254 WhereToPoint where_to_point) {
255 DCHECK(reference.is_attached_reference());
256 DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
257 (how_to_code == kPlain && where_to_point == kInnerPointer) ||
258 (how_to_code == kFromCode && where_to_point == kStartOfObject) ||
259 (how_to_code == kFromCode && where_to_point == kInnerPointer));
260 sink_.Put(kAttachedReference + how_to_code + where_to_point, "AttachedRef");
261 sink_.PutInt(reference.attached_reference_index(), "AttachedRefIndex");
262 }
263
PutAlignmentPrefix(HeapObject * object)264 int Serializer::PutAlignmentPrefix(HeapObject* object) {
265 AllocationAlignment alignment = object->RequiredAlignment();
266 if (alignment != kWordAligned) {
267 DCHECK(1 <= alignment && alignment <= 3);
268 byte prefix = (kAlignmentPrefix - 1) + alignment;
269 sink_.Put(prefix, "Alignment");
270 return Heap::GetMaximumFillToAlign(alignment);
271 }
272 return 0;
273 }
274
AllocateLargeObject(int size)275 SerializerReference Serializer::AllocateLargeObject(int size) {
276 // Large objects are allocated one-by-one when deserializing. We do not
277 // have to keep track of multiple chunks.
278 large_objects_total_size_ += size;
279 return SerializerReference::LargeObjectReference(seen_large_objects_index_++);
280 }
281
AllocateMap()282 SerializerReference Serializer::AllocateMap() {
283 // Maps are allocated one-by-one when deserializing.
284 return SerializerReference::MapReference(num_maps_++);
285 }
286
Allocate(AllocationSpace space,int size)287 SerializerReference Serializer::Allocate(AllocationSpace space, int size) {
288 DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
289 DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
290 uint32_t new_chunk_size = pending_chunk_[space] + size;
291 if (new_chunk_size > max_chunk_size(space)) {
292 // The new chunk size would not fit onto a single page. Complete the
293 // current chunk and start a new one.
294 sink_.Put(kNextChunk, "NextChunk");
295 sink_.Put(space, "NextChunkSpace");
296 completed_chunks_[space].Add(pending_chunk_[space]);
297 pending_chunk_[space] = 0;
298 new_chunk_size = size;
299 }
300 uint32_t offset = pending_chunk_[space];
301 pending_chunk_[space] = new_chunk_size;
302 return SerializerReference::BackReference(
303 space, completed_chunks_[space].length(), offset);
304 }
305
Pad()306 void Serializer::Pad() {
307 // The non-branching GetInt will read up to 3 bytes too far, so we need
308 // to pad the snapshot to make sure we don't read over the end.
309 for (unsigned i = 0; i < sizeof(int32_t) - 1; i++) {
310 sink_.Put(kNop, "Padding");
311 }
312 // Pad up to pointer size for checksum.
313 while (!IsAligned(sink_.Position(), kPointerAlignment)) {
314 sink_.Put(kNop, "Padding");
315 }
316 }
317
InitializeCodeAddressMap()318 void Serializer::InitializeCodeAddressMap() {
319 isolate_->InitializeLoggingAndCounters();
320 code_address_map_ = new CodeAddressMap(isolate_);
321 }
322
CopyCode(Code * code)323 Code* Serializer::CopyCode(Code* code) {
324 code_buffer_.Rewind(0); // Clear buffer without deleting backing store.
325 int size = code->CodeSize();
326 code_buffer_.AddAll(Vector<byte>(code->address(), size));
327 return Code::cast(HeapObject::FromAddress(&code_buffer_.first()));
328 }
329
HasNotExceededFirstPageOfEachSpace()330 bool Serializer::HasNotExceededFirstPageOfEachSpace() {
331 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
332 if (!completed_chunks_[i].is_empty()) return false;
333 }
334 return true;
335 }
336
SerializePrologue(AllocationSpace space,int size,Map * map)337 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
338 int size, Map* map) {
339 if (serializer_->code_address_map_) {
340 const char* code_name =
341 serializer_->code_address_map_->Lookup(object_->address());
342 LOG(serializer_->isolate_,
343 CodeNameEvent(object_->address(), sink_->Position(), code_name));
344 }
345
346 SerializerReference back_reference;
347 if (space == LO_SPACE) {
348 sink_->Put(kNewObject + reference_representation_ + space,
349 "NewLargeObject");
350 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
351 if (object_->IsCode()) {
352 sink_->Put(EXECUTABLE, "executable large object");
353 } else {
354 sink_->Put(NOT_EXECUTABLE, "not executable large object");
355 }
356 back_reference = serializer_->AllocateLargeObject(size);
357 } else if (space == MAP_SPACE) {
358 DCHECK_EQ(Map::kSize, size);
359 back_reference = serializer_->AllocateMap();
360 sink_->Put(kNewObject + reference_representation_ + space, "NewMap");
361 // This is redundant, but we include it anyways.
362 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
363 } else {
364 int fill = serializer_->PutAlignmentPrefix(object_);
365 back_reference = serializer_->Allocate(space, size + fill);
366 sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
367 sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
368 }
369
370 #ifdef OBJECT_PRINT
371 if (FLAG_serialization_statistics) {
372 serializer_->CountInstanceType(map, size);
373 }
374 #endif // OBJECT_PRINT
375
376 // Mark this object as already serialized.
377 serializer_->reference_map()->Add(object_, back_reference);
378
379 // Serialize the map (first word of the object).
380 serializer_->SerializeObject(map, kPlain, kStartOfObject, 0);
381 }
382
SerializeExternalString()383 void Serializer::ObjectSerializer::SerializeExternalString() {
384 // Instead of serializing this as an external string, we serialize
385 // an imaginary sequential string with the same content.
386 Isolate* isolate = serializer_->isolate();
387 DCHECK(object_->IsExternalString());
388 DCHECK(object_->map() != isolate->heap()->native_source_string_map());
389 ExternalString* string = ExternalString::cast(object_);
390 int length = string->length();
391 Map* map;
392 int content_size;
393 int allocation_size;
394 const byte* resource;
395 // Find the map and size for the imaginary sequential string.
396 bool internalized = object_->IsInternalizedString();
397 if (object_->IsExternalOneByteString()) {
398 map = internalized ? isolate->heap()->one_byte_internalized_string_map()
399 : isolate->heap()->one_byte_string_map();
400 allocation_size = SeqOneByteString::SizeFor(length);
401 content_size = length * kCharSize;
402 resource = reinterpret_cast<const byte*>(
403 ExternalOneByteString::cast(string)->resource()->data());
404 } else {
405 map = internalized ? isolate->heap()->internalized_string_map()
406 : isolate->heap()->string_map();
407 allocation_size = SeqTwoByteString::SizeFor(length);
408 content_size = length * kShortSize;
409 resource = reinterpret_cast<const byte*>(
410 ExternalTwoByteString::cast(string)->resource()->data());
411 }
412
413 AllocationSpace space =
414 (allocation_size > kMaxRegularHeapObjectSize) ? LO_SPACE : OLD_SPACE;
415 SerializePrologue(space, allocation_size, map);
416
417 // Output the rest of the imaginary string.
418 int bytes_to_output = allocation_size - HeapObject::kHeaderSize;
419
420 // Output raw data header. Do not bother with common raw length cases here.
421 sink_->Put(kVariableRawData, "RawDataForString");
422 sink_->PutInt(bytes_to_output, "length");
423
424 // Serialize string header (except for map).
425 Address string_start = string->address();
426 for (int i = HeapObject::kHeaderSize; i < SeqString::kHeaderSize; i++) {
427 sink_->PutSection(string_start[i], "StringHeader");
428 }
429
430 // Serialize string content.
431 sink_->PutRaw(resource, content_size, "StringContent");
432
433 // Since the allocation size is rounded up to object alignment, there
434 // maybe left-over bytes that need to be padded.
435 int padding_size = allocation_size - SeqString::kHeaderSize - content_size;
436 DCHECK(0 <= padding_size && padding_size < kObjectAlignment);
437 for (int i = 0; i < padding_size; i++) sink_->PutSection(0, "StringPadding");
438
439 sink_->Put(kSkip, "SkipAfterString");
440 sink_->PutInt(bytes_to_output, "SkipDistance");
441 }
442
443 // Clear and later restore the next link in the weak cell or allocation site.
444 // TODO(all): replace this with proper iteration of weak slots in serializer.
445 class UnlinkWeakNextScope {
446 public:
UnlinkWeakNextScope(HeapObject * object)447 explicit UnlinkWeakNextScope(HeapObject* object) : object_(nullptr) {
448 if (object->IsWeakCell()) {
449 object_ = object;
450 next_ = WeakCell::cast(object)->next();
451 WeakCell::cast(object)->clear_next(object->GetHeap()->the_hole_value());
452 } else if (object->IsAllocationSite()) {
453 object_ = object;
454 next_ = AllocationSite::cast(object)->weak_next();
455 AllocationSite::cast(object)->set_weak_next(
456 object->GetHeap()->undefined_value());
457 }
458 }
459
~UnlinkWeakNextScope()460 ~UnlinkWeakNextScope() {
461 if (object_ != nullptr) {
462 if (object_->IsWeakCell()) {
463 WeakCell::cast(object_)->set_next(next_, UPDATE_WEAK_WRITE_BARRIER);
464 } else {
465 AllocationSite::cast(object_)->set_weak_next(next_,
466 UPDATE_WEAK_WRITE_BARRIER);
467 }
468 }
469 }
470
471 private:
472 HeapObject* object_;
473 Object* next_;
474 DisallowHeapAllocation no_gc_;
475 };
476
Serialize()477 void Serializer::ObjectSerializer::Serialize() {
478 if (FLAG_trace_serializer) {
479 PrintF(" Encoding heap object: ");
480 object_->ShortPrint();
481 PrintF("\n");
482 }
483
484 // We cannot serialize typed array objects correctly.
485 DCHECK(!object_->IsJSTypedArray());
486
487 // We don't expect fillers.
488 DCHECK(!object_->IsFiller());
489
490 if (object_->IsScript()) {
491 // Clear cached line ends.
492 Object* undefined = serializer_->isolate()->heap()->undefined_value();
493 Script::cast(object_)->set_line_ends(undefined);
494 }
495
496 if (object_->IsExternalString()) {
497 Heap* heap = serializer_->isolate()->heap();
498 if (object_->map() != heap->native_source_string_map()) {
499 // Usually we cannot recreate resources for external strings. To work
500 // around this, external strings are serialized to look like ordinary
501 // sequential strings.
502 // The exception are native source code strings, since we can recreate
503 // their resources. In that case we fall through and leave it to
504 // VisitExternalOneByteString further down.
505 SerializeExternalString();
506 return;
507 }
508 }
509
510 int size = object_->Size();
511 Map* map = object_->map();
512 AllocationSpace space =
513 MemoryChunk::FromAddress(object_->address())->owner()->identity();
514 SerializePrologue(space, size, map);
515
516 // Serialize the rest of the object.
517 CHECK_EQ(0, bytes_processed_so_far_);
518 bytes_processed_so_far_ = kPointerSize;
519
520 RecursionScope recursion(serializer_);
521 // Objects that are immediately post processed during deserialization
522 // cannot be deferred, since post processing requires the object content.
523 if (recursion.ExceedsMaximum() && CanBeDeferred(object_)) {
524 serializer_->QueueDeferredObject(object_);
525 sink_->Put(kDeferred, "Deferring object content");
526 return;
527 }
528
529 UnlinkWeakNextScope unlink_weak_next(object_);
530
531 object_->IterateBody(map->instance_type(), size, this);
532 OutputRawData(object_->address() + size);
533 }
534
SerializeDeferred()535 void Serializer::ObjectSerializer::SerializeDeferred() {
536 if (FLAG_trace_serializer) {
537 PrintF(" Encoding deferred heap object: ");
538 object_->ShortPrint();
539 PrintF("\n");
540 }
541
542 int size = object_->Size();
543 Map* map = object_->map();
544 SerializerReference back_reference =
545 serializer_->reference_map()->Lookup(object_);
546 DCHECK(back_reference.is_back_reference());
547
548 // Serialize the rest of the object.
549 CHECK_EQ(0, bytes_processed_so_far_);
550 bytes_processed_so_far_ = kPointerSize;
551
552 serializer_->PutAlignmentPrefix(object_);
553 sink_->Put(kNewObject + back_reference.space(), "deferred object");
554 serializer_->PutBackReference(object_, back_reference);
555 sink_->PutInt(size >> kPointerSizeLog2, "deferred object size");
556
557 UnlinkWeakNextScope unlink_weak_next(object_);
558
559 object_->IterateBody(map->instance_type(), size, this);
560 OutputRawData(object_->address() + size);
561 }
562
VisitPointers(Object ** start,Object ** end)563 void Serializer::ObjectSerializer::VisitPointers(Object** start, Object** end) {
564 Object** current = start;
565 while (current < end) {
566 while (current < end && (*current)->IsSmi()) current++;
567 if (current < end) OutputRawData(reinterpret_cast<Address>(current));
568
569 while (current < end && !(*current)->IsSmi()) {
570 HeapObject* current_contents = HeapObject::cast(*current);
571 int root_index = serializer_->root_index_map()->Lookup(current_contents);
572 // Repeats are not subject to the write barrier so we can only use
573 // immortal immovable root members. They are never in new space.
574 if (current != start && root_index != RootIndexMap::kInvalidRootIndex &&
575 Heap::RootIsImmortalImmovable(root_index) &&
576 current_contents == current[-1]) {
577 DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
578 int repeat_count = 1;
579 while (¤t[repeat_count] < end - 1 &&
580 current[repeat_count] == current_contents) {
581 repeat_count++;
582 }
583 current += repeat_count;
584 bytes_processed_so_far_ += repeat_count * kPointerSize;
585 if (repeat_count > kNumberOfFixedRepeat) {
586 sink_->Put(kVariableRepeat, "VariableRepeat");
587 sink_->PutInt(repeat_count, "repeat count");
588 } else {
589 sink_->Put(kFixedRepeatStart + repeat_count, "FixedRepeat");
590 }
591 } else {
592 serializer_->SerializeObject(current_contents, kPlain, kStartOfObject,
593 0);
594 bytes_processed_so_far_ += kPointerSize;
595 current++;
596 }
597 }
598 }
599 }
600
VisitEmbeddedPointer(RelocInfo * rinfo)601 void Serializer::ObjectSerializer::VisitEmbeddedPointer(RelocInfo* rinfo) {
602 int skip = OutputRawData(rinfo->target_address_address(),
603 kCanReturnSkipInsteadOfSkipping);
604 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
605 Object* object = rinfo->target_object();
606 serializer_->SerializeObject(HeapObject::cast(object), how_to_code,
607 kStartOfObject, skip);
608 bytes_processed_so_far_ += rinfo->target_address_size();
609 }
610
VisitExternalReference(Address * p)611 void Serializer::ObjectSerializer::VisitExternalReference(Address* p) {
612 int skip = OutputRawData(reinterpret_cast<Address>(p),
613 kCanReturnSkipInsteadOfSkipping);
614 sink_->Put(kExternalReference + kPlain + kStartOfObject, "ExternalRef");
615 sink_->PutInt(skip, "SkipB4ExternalRef");
616 Address target = *p;
617 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
618 bytes_processed_so_far_ += kPointerSize;
619 }
620
VisitExternalReference(RelocInfo * rinfo)621 void Serializer::ObjectSerializer::VisitExternalReference(RelocInfo* rinfo) {
622 int skip = OutputRawData(rinfo->target_address_address(),
623 kCanReturnSkipInsteadOfSkipping);
624 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
625 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
626 sink_->PutInt(skip, "SkipB4ExternalRef");
627 Address target = rinfo->target_external_reference();
628 DCHECK_NOT_NULL(target); // Code does not reference null.
629 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
630 bytes_processed_so_far_ += rinfo->target_address_size();
631 }
632
VisitInternalReference(RelocInfo * rinfo)633 void Serializer::ObjectSerializer::VisitInternalReference(RelocInfo* rinfo) {
634 // We can only reference to internal references of code that has been output.
635 DCHECK(object_->IsCode() && code_has_been_output_);
636 // We do not use skip from last patched pc to find the pc to patch, since
637 // target_address_address may not return addresses in ascending order when
638 // used for internal references. External references may be stored at the
639 // end of the code in the constant pool, whereas internal references are
640 // inline. That would cause the skip to be negative. Instead, we store the
641 // offset from code entry.
642 Address entry = Code::cast(object_)->entry();
643 intptr_t pc_offset = rinfo->target_internal_reference_address() - entry;
644 intptr_t target_offset = rinfo->target_internal_reference() - entry;
645 DCHECK(0 <= pc_offset &&
646 pc_offset <= Code::cast(object_)->instruction_size());
647 DCHECK(0 <= target_offset &&
648 target_offset <= Code::cast(object_)->instruction_size());
649 sink_->Put(rinfo->rmode() == RelocInfo::INTERNAL_REFERENCE
650 ? kInternalReference
651 : kInternalReferenceEncoded,
652 "InternalRef");
653 sink_->PutInt(static_cast<uintptr_t>(pc_offset), "internal ref address");
654 sink_->PutInt(static_cast<uintptr_t>(target_offset), "internal ref value");
655 }
656
VisitRuntimeEntry(RelocInfo * rinfo)657 void Serializer::ObjectSerializer::VisitRuntimeEntry(RelocInfo* rinfo) {
658 int skip = OutputRawData(rinfo->target_address_address(),
659 kCanReturnSkipInsteadOfSkipping);
660 HowToCode how_to_code = rinfo->IsCodedSpecially() ? kFromCode : kPlain;
661 sink_->Put(kExternalReference + how_to_code + kStartOfObject, "ExternalRef");
662 sink_->PutInt(skip, "SkipB4ExternalRef");
663 Address target = rinfo->target_address();
664 sink_->PutInt(serializer_->EncodeExternalReference(target), "reference id");
665 bytes_processed_so_far_ += rinfo->target_address_size();
666 }
667
VisitCodeTarget(RelocInfo * rinfo)668 void Serializer::ObjectSerializer::VisitCodeTarget(RelocInfo* rinfo) {
669 int skip = OutputRawData(rinfo->target_address_address(),
670 kCanReturnSkipInsteadOfSkipping);
671 Code* object = Code::GetCodeFromTargetAddress(rinfo->target_address());
672 serializer_->SerializeObject(object, kFromCode, kInnerPointer, skip);
673 bytes_processed_so_far_ += rinfo->target_address_size();
674 }
675
VisitCodeEntry(Address entry_address)676 void Serializer::ObjectSerializer::VisitCodeEntry(Address entry_address) {
677 int skip = OutputRawData(entry_address, kCanReturnSkipInsteadOfSkipping);
678 Code* object = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
679 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
680 bytes_processed_so_far_ += kPointerSize;
681 }
682
VisitCell(RelocInfo * rinfo)683 void Serializer::ObjectSerializer::VisitCell(RelocInfo* rinfo) {
684 int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
685 Cell* object = Cell::cast(rinfo->target_cell());
686 serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
687 bytes_processed_so_far_ += kPointerSize;
688 }
689
SerializeExternalNativeSourceString(int builtin_count,v8::String::ExternalOneByteStringResource ** resource_pointer,FixedArray * source_cache,int resource_index)690 bool Serializer::ObjectSerializer::SerializeExternalNativeSourceString(
691 int builtin_count,
692 v8::String::ExternalOneByteStringResource** resource_pointer,
693 FixedArray* source_cache, int resource_index) {
694 Isolate* isolate = serializer_->isolate();
695 for (int i = 0; i < builtin_count; i++) {
696 Object* source = source_cache->get(i);
697 if (!source->IsUndefined(isolate)) {
698 ExternalOneByteString* string = ExternalOneByteString::cast(source);
699 typedef v8::String::ExternalOneByteStringResource Resource;
700 const Resource* resource = string->resource();
701 if (resource == *resource_pointer) {
702 sink_->Put(resource_index, "NativesStringResource");
703 sink_->PutSection(i, "NativesStringResourceEnd");
704 bytes_processed_so_far_ += sizeof(resource);
705 return true;
706 }
707 }
708 }
709 return false;
710 }
711
VisitExternalOneByteString(v8::String::ExternalOneByteStringResource ** resource_pointer)712 void Serializer::ObjectSerializer::VisitExternalOneByteString(
713 v8::String::ExternalOneByteStringResource** resource_pointer) {
714 DCHECK_EQ(serializer_->isolate()->heap()->native_source_string_map(),
715 object_->map());
716 DCHECK(ExternalOneByteString::cast(object_)->is_short());
717 Address references_start = reinterpret_cast<Address>(resource_pointer);
718 OutputRawData(references_start);
719 if (SerializeExternalNativeSourceString(
720 Natives::GetBuiltinsCount(), resource_pointer,
721 Natives::GetSourceCache(serializer_->isolate()->heap()),
722 kNativesStringResource)) {
723 return;
724 }
725 if (SerializeExternalNativeSourceString(
726 ExtraNatives::GetBuiltinsCount(), resource_pointer,
727 ExtraNatives::GetSourceCache(serializer_->isolate()->heap()),
728 kExtraNativesStringResource)) {
729 return;
730 }
731 // One of the strings in the natives cache should match the resource. We
732 // don't expect any other kinds of external strings here.
733 UNREACHABLE();
734 }
735
PrepareCode()736 Address Serializer::ObjectSerializer::PrepareCode() {
737 Code* code = Code::cast(object_);
738 if (FLAG_predictable) {
739 // To make snapshots reproducible, we make a copy of the code object
740 // and wipe all pointers in the copy, which we then serialize.
741 code = serializer_->CopyCode(code);
742 int mode_mask = RelocInfo::kCodeTargetMask |
743 RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
744 RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
745 RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY) |
746 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
747 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
748 for (RelocIterator it(code, mode_mask); !it.done(); it.next()) {
749 RelocInfo* rinfo = it.rinfo();
750 rinfo->WipeOut();
751 }
752 // We need to wipe out the header fields *after* wiping out the
753 // relocations, because some of these fields are needed for the latter.
754 code->WipeOutHeader();
755 }
756 // Code age headers are not serializable.
757 code->MakeYoung(serializer_->isolate());
758 return code->address();
759 }
760
OutputRawData(Address up_to,Serializer::ObjectSerializer::ReturnSkip return_skip)761 int Serializer::ObjectSerializer::OutputRawData(
762 Address up_to, Serializer::ObjectSerializer::ReturnSkip return_skip) {
763 Address object_start = object_->address();
764 int base = bytes_processed_so_far_;
765 int up_to_offset = static_cast<int>(up_to - object_start);
766 int to_skip = up_to_offset - bytes_processed_so_far_;
767 int bytes_to_output = to_skip;
768 bytes_processed_so_far_ += to_skip;
769 // This assert will fail if the reloc info gives us the target_address_address
770 // locations in a non-ascending order. Luckily that doesn't happen.
771 DCHECK(to_skip >= 0);
772 bool outputting_code = false;
773 bool is_code_object = object_->IsCode();
774 if (to_skip != 0 && is_code_object && !code_has_been_output_) {
775 // Output the code all at once and fix later.
776 bytes_to_output = object_->Size() + to_skip - bytes_processed_so_far_;
777 outputting_code = true;
778 code_has_been_output_ = true;
779 }
780 if (bytes_to_output != 0 && (!is_code_object || outputting_code)) {
781 if (!outputting_code && bytes_to_output == to_skip &&
782 IsAligned(bytes_to_output, kPointerAlignment) &&
783 bytes_to_output <= kNumberOfFixedRawData * kPointerSize) {
784 int size_in_words = bytes_to_output >> kPointerSizeLog2;
785 sink_->PutSection(kFixedRawDataStart + size_in_words, "FixedRawData");
786 to_skip = 0; // This instruction includes skip.
787 } else {
788 // We always end up here if we are outputting the code of a code object.
789 sink_->Put(kVariableRawData, "VariableRawData");
790 sink_->PutInt(bytes_to_output, "length");
791 }
792
793 if (is_code_object) object_start = PrepareCode();
794
795 const char* description = is_code_object ? "Code" : "Byte";
796 sink_->PutRaw(object_start + base, bytes_to_output, description);
797 }
798 if (to_skip != 0 && return_skip == kIgnoringReturn) {
799 sink_->Put(kSkip, "Skip");
800 sink_->PutInt(to_skip, "SkipDistance");
801 to_skip = 0;
802 }
803 return to_skip;
804 }
805
806 } // namespace internal
807 } // namespace v8
808