• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_SNAPSHOT_SERIALIZE_H_
6 #define V8_SNAPSHOT_SERIALIZE_H_
7 
8 #include "src/address-map.h"
9 #include "src/heap/heap.h"
10 #include "src/objects.h"
11 #include "src/snapshot/snapshot-source-sink.h"
12 
13 namespace v8 {
14 namespace internal {
15 
16 class Isolate;
17 class ScriptData;
18 
19 static const int kDeoptTableSerializeEntryCount = 64;
20 
21 // ExternalReferenceTable is a helper class that defines the relationship
22 // between external references and their encodings. It is used to build
23 // hashmaps in ExternalReferenceEncoder and ExternalReferenceDecoder.
24 class ExternalReferenceTable {
25  public:
26   static ExternalReferenceTable* instance(Isolate* isolate);
27 
size()28   int size() const { return refs_.length(); }
address(int i)29   Address address(int i) { return refs_[i].address; }
name(int i)30   const char* name(int i) { return refs_[i].name; }
31 
NotAvailable()32   inline static Address NotAvailable() { return NULL; }
33 
34  private:
35   struct ExternalReferenceEntry {
36     Address address;
37     const char* name;
38   };
39 
40   explicit ExternalReferenceTable(Isolate* isolate);
41 
Add(Address address,const char * name)42   void Add(Address address, const char* name) {
43     ExternalReferenceEntry entry = {address, name};
44     refs_.Add(entry);
45   }
46 
47   List<ExternalReferenceEntry> refs_;
48 
49   DISALLOW_COPY_AND_ASSIGN(ExternalReferenceTable);
50 };
51 
52 
53 class ExternalReferenceEncoder {
54  public:
55   explicit ExternalReferenceEncoder(Isolate* isolate);
56 
57   uint32_t Encode(Address key) const;
58 
59   const char* NameOfAddress(Isolate* isolate, Address address) const;
60 
61  private:
Hash(Address key)62   static uint32_t Hash(Address key) {
63     return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key) >>
64                                  kPointerSizeLog2);
65   }
66 
67   HashMap* map_;
68 
69   DISALLOW_COPY_AND_ASSIGN(ExternalReferenceEncoder);
70 };
71 
72 
73 class PartialCacheIndexMap : public AddressMapBase {
74  public:
PartialCacheIndexMap()75   PartialCacheIndexMap() : map_(HashMap::PointersMatch) {}
76 
77   static const int kInvalidIndex = -1;
78 
79   // Lookup object in the map. Return its index if found, or create
80   // a new entry with new_index as value, and return kInvalidIndex.
LookupOrInsert(HeapObject * obj,int new_index)81   int LookupOrInsert(HeapObject* obj, int new_index) {
82     HashMap::Entry* entry = LookupEntry(&map_, obj, false);
83     if (entry != NULL) return GetValue(entry);
84     SetValue(LookupEntry(&map_, obj, true), static_cast<uint32_t>(new_index));
85     return kInvalidIndex;
86   }
87 
88  private:
89   HashMap map_;
90 
91   DISALLOW_COPY_AND_ASSIGN(PartialCacheIndexMap);
92 };
93 
94 
95 class HotObjectsList {
96  public:
HotObjectsList()97   HotObjectsList() : index_(0) {
98     for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
99   }
100 
Add(HeapObject * object)101   void Add(HeapObject* object) {
102     circular_queue_[index_] = object;
103     index_ = (index_ + 1) & kSizeMask;
104   }
105 
Get(int index)106   HeapObject* Get(int index) {
107     DCHECK_NOT_NULL(circular_queue_[index]);
108     return circular_queue_[index];
109   }
110 
111   static const int kNotFound = -1;
112 
Find(HeapObject * object)113   int Find(HeapObject* object) {
114     for (int i = 0; i < kSize; i++) {
115       if (circular_queue_[i] == object) return i;
116     }
117     return kNotFound;
118   }
119 
120   static const int kSize = 8;
121 
122  private:
123   STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
124   static const int kSizeMask = kSize - 1;
125   HeapObject* circular_queue_[kSize];
126   int index_;
127 
128   DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
129 };
130 
131 
132 // The Serializer/Deserializer class is a common superclass for Serializer and
133 // Deserializer which is used to store common constants and methods used by
134 // both.
135 class SerializerDeserializer: public ObjectVisitor {
136  public:
137   static void Iterate(Isolate* isolate, ObjectVisitor* visitor);
138 
139   // No reservation for large object space necessary.
140   static const int kNumberOfPreallocatedSpaces = LAST_PAGED_SPACE + 1;
141   static const int kNumberOfSpaces = LAST_SPACE + 1;
142 
143  protected:
144   static bool CanBeDeferred(HeapObject* o);
145 
146   // ---------- byte code range 0x00..0x7f ----------
147   // Byte codes in this range represent Where, HowToCode and WhereToPoint.
148   // Where the pointed-to object can be found:
149   // The static assert below will trigger when the number of preallocated spaces
150   // changed. If that happens, update the bytecode ranges in the comments below.
151   STATIC_ASSERT(5 == kNumberOfSpaces);
152   enum Where {
153     // 0x00..0x04  Allocate new object, in specified space.
154     kNewObject = 0,
155     // 0x05        Unused (including 0x25, 0x45, 0x65).
156     // 0x06        Unused (including 0x26, 0x46, 0x66).
157     // 0x07        Unused (including 0x27, 0x47, 0x67).
158     // 0x08..0x0c  Reference to previous object from space.
159     kBackref = 0x08,
160     // 0x0d        Unused (including 0x2d, 0x4d, 0x6d).
161     // 0x0e        Unused (including 0x2e, 0x4e, 0x6e).
162     // 0x0f        Unused (including 0x2f, 0x4f, 0x6f).
163     // 0x10..0x14  Reference to previous object from space after skip.
164     kBackrefWithSkip = 0x10,
165     // 0x15        Unused (including 0x35, 0x55, 0x75).
166     // 0x16        Unused (including 0x36, 0x56, 0x76).
167     // 0x17        Misc (including 0x37, 0x57, 0x77).
168     // 0x18        Root array item.
169     kRootArray = 0x18,
170     // 0x19        Object in the partial snapshot cache.
171     kPartialSnapshotCache = 0x19,
172     // 0x1a        External reference referenced by id.
173     kExternalReference = 0x1a,
174     // 0x1b        Object provided in the attached list.
175     kAttachedReference = 0x1b,
176     // 0x1c        Builtin code referenced by index.
177     kBuiltin = 0x1c
178     // 0x1d..0x1f  Misc (including 0x3d..0x3f, 0x5d..0x5f, 0x7d..0x7f)
179   };
180 
181   static const int kWhereMask = 0x1f;
182   static const int kSpaceMask = 7;
183   STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
184 
185   // How to code the pointer to the object.
186   enum HowToCode {
187     // Straight pointer.
188     kPlain = 0,
189     // A pointer inlined in code. What this means depends on the architecture.
190     kFromCode = 0x20
191   };
192 
193   static const int kHowToCodeMask = 0x20;
194 
195   // Where to point within the object.
196   enum WhereToPoint {
197     // Points to start of object
198     kStartOfObject = 0,
199     // Points to instruction in code object or payload of cell.
200     kInnerPointer = 0x40
201   };
202 
203   static const int kWhereToPointMask = 0x40;
204 
205   // ---------- Misc ----------
206   // Skip.
207   static const int kSkip = 0x1d;
208   // Internal reference encoded as offsets of pc and target from code entry.
209   static const int kInternalReference = 0x1e;
210   static const int kInternalReferenceEncoded = 0x1f;
211   // Do nothing, used for padding.
212   static const int kNop = 0x3d;
213   // Move to next reserved chunk.
214   static const int kNextChunk = 0x3e;
215   // Deferring object content.
216   static const int kDeferred = 0x3f;
217   // Used for the source code of the natives, which is in the executable, but
218   // is referred to from external strings in the snapshot.
219   static const int kNativesStringResource = 0x5d;
220   // Used for the source code for compiled stubs, which is in the executable,
221   // but is referred to from external strings in the snapshot.
222   static const int kExtraNativesStringResource = 0x5e;
223   // A tag emitted at strategic points in the snapshot to delineate sections.
224   // If the deserializer does not find these at the expected moments then it
225   // is an indication that the snapshot and the VM do not fit together.
226   // Examine the build process for architecture, version or configuration
227   // mismatches.
228   static const int kSynchronize = 0x17;
229   // Repeats of variable length.
230   static const int kVariableRepeat = 0x37;
231   // Raw data of variable length.
232   static const int kVariableRawData = 0x57;
233   // Alignment prefixes 0x7d..0x7f
234   static const int kAlignmentPrefix = 0x7d;
235 
236   // 0x77 unused
237 
238   // ---------- byte code range 0x80..0xff ----------
239   // First 32 root array items.
240   static const int kNumberOfRootArrayConstants = 0x20;
241   // 0x80..0x9f
242   static const int kRootArrayConstants = 0x80;
243   // 0xa0..0xbf
244   static const int kRootArrayConstantsWithSkip = 0xa0;
245   static const int kRootArrayConstantsMask = 0x1f;
246 
247   // 8 hot (recently seen or back-referenced) objects with optional skip.
248   static const int kNumberOfHotObjects = 0x08;
249   // 0xc0..0xc7
250   static const int kHotObject = 0xc0;
251   // 0xc8..0xcf
252   static const int kHotObjectWithSkip = 0xc8;
253   static const int kHotObjectMask = 0x07;
254 
255   // 32 common raw data lengths.
256   static const int kNumberOfFixedRawData = 0x20;
257   // 0xd0..0xef
258   static const int kFixedRawData = 0xd0;
259   static const int kOnePointerRawData = kFixedRawData;
260   static const int kFixedRawDataStart = kFixedRawData - 1;
261 
262   // 16 repeats lengths.
263   static const int kNumberOfFixedRepeat = 0x10;
264   // 0xf0..0xff
265   static const int kFixedRepeat = 0xf0;
266   static const int kFixedRepeatStart = kFixedRepeat - 1;
267 
268   // ---------- special values ----------
269   static const int kAnyOldSpace = -1;
270 
271   // Sentinel after a new object to indicate that double alignment is needed.
272   static const int kDoubleAlignmentSentinel = 0;
273 
274   // Used as index for the attached reference representing the source object.
275   static const int kSourceObjectReference = 0;
276 
277   // Used as index for the attached reference representing the global proxy.
278   static const int kGlobalProxyReference = 0;
279 
280   // ---------- member variable ----------
281   HotObjectsList hot_objects_;
282 };
283 
284 
285 class SerializedData {
286  public:
287   class Reservation {
288    public:
Reservation(uint32_t size)289     explicit Reservation(uint32_t size)
290         : reservation_(ChunkSizeBits::encode(size)) {}
291 
chunk_size()292     uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
is_last()293     bool is_last() const { return IsLastChunkBits::decode(reservation_); }
294 
mark_as_last()295     void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
296 
297    private:
298     uint32_t reservation_;
299   };
300 
SerializedData(byte * data,int size)301   SerializedData(byte* data, int size)
302       : data_(data), size_(size), owns_data_(false) {}
SerializedData()303   SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
304 
~SerializedData()305   ~SerializedData() {
306     if (owns_data_) DeleteArray<byte>(data_);
307   }
308 
GetMagicNumber()309   uint32_t GetMagicNumber() const { return GetHeaderValue(kMagicNumberOffset); }
310 
311   class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
312   class IsLastChunkBits : public BitField<bool, 31, 1> {};
313 
ComputeMagicNumber(ExternalReferenceTable * table)314   static uint32_t ComputeMagicNumber(ExternalReferenceTable* table) {
315     uint32_t external_refs = table->size();
316     return 0xC0DE0000 ^ external_refs;
317   }
318 
319  protected:
SetHeaderValue(int offset,uint32_t value)320   void SetHeaderValue(int offset, uint32_t value) {
321     uint32_t* address = reinterpret_cast<uint32_t*>(data_ + offset);
322     memcpy(reinterpret_cast<uint32_t*>(address), &value, sizeof(value));
323   }
324 
GetHeaderValue(int offset)325   uint32_t GetHeaderValue(int offset) const {
326     uint32_t value;
327     memcpy(&value, reinterpret_cast<int*>(data_ + offset), sizeof(value));
328     return value;
329   }
330 
331   void AllocateData(int size);
332 
ComputeMagicNumber(Isolate * isolate)333   static uint32_t ComputeMagicNumber(Isolate* isolate) {
334     return ComputeMagicNumber(ExternalReferenceTable::instance(isolate));
335   }
336 
SetMagicNumber(Isolate * isolate)337   void SetMagicNumber(Isolate* isolate) {
338     SetHeaderValue(kMagicNumberOffset, ComputeMagicNumber(isolate));
339   }
340 
341   static const int kMagicNumberOffset = 0;
342 
343   byte* data_;
344   int size_;
345   bool owns_data_;
346 };
347 
348 
349 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
350 class Deserializer: public SerializerDeserializer {
351  public:
352   // Create a deserializer from a snapshot byte source.
353   template <class Data>
Deserializer(Data * data)354   explicit Deserializer(Data* data)
355       : isolate_(NULL),
356         source_(data->Payload()),
357         magic_number_(data->GetMagicNumber()),
358         external_reference_table_(NULL),
359         deserialized_large_objects_(0),
360         deserializing_user_code_(false),
361         next_alignment_(kWordAligned) {
362     DecodeReservation(data->Reservations());
363   }
364 
365   ~Deserializer() override;
366 
367   // Deserialize the snapshot into an empty heap.
368   void Deserialize(Isolate* isolate);
369 
370   // Deserialize a single object and the objects reachable from it.
371   MaybeHandle<Object> DeserializePartial(Isolate* isolate,
372                                          Handle<JSGlobalProxy> global_proxy);
373 
374   // Deserialize a shared function info. Fail gracefully.
375   MaybeHandle<SharedFunctionInfo> DeserializeCode(Isolate* isolate);
376 
377   // Pass a vector of externally-provided objects referenced by the snapshot.
378   // The ownership to its backing store is handed over as well.
SetAttachedObjects(Vector<Handle<Object>> attached_objects)379   void SetAttachedObjects(Vector<Handle<Object> > attached_objects) {
380     attached_objects_ = attached_objects;
381   }
382 
383  private:
384   void VisitPointers(Object** start, Object** end) override;
385 
VisitRuntimeEntry(RelocInfo * rinfo)386   void VisitRuntimeEntry(RelocInfo* rinfo) override { UNREACHABLE(); }
387 
388   void Initialize(Isolate* isolate);
389 
deserializing_user_code()390   bool deserializing_user_code() { return deserializing_user_code_; }
391 
392   void DecodeReservation(Vector<const SerializedData::Reservation> res);
393 
394   bool ReserveSpace();
395 
UnalignedCopy(Object ** dest,Object ** src)396   void UnalignedCopy(Object** dest, Object** src) {
397     memcpy(dest, src, sizeof(*src));
398   }
399 
SetAlignment(byte data)400   void SetAlignment(byte data) {
401     DCHECK_EQ(kWordAligned, next_alignment_);
402     int alignment = data - (kAlignmentPrefix - 1);
403     DCHECK_LE(kWordAligned, alignment);
404     DCHECK_LE(alignment, kSimd128Unaligned);
405     next_alignment_ = static_cast<AllocationAlignment>(alignment);
406   }
407 
408   void DeserializeDeferredObjects();
409 
410   void FlushICacheForNewIsolate();
411   void FlushICacheForNewCodeObjects();
412 
413   void CommitPostProcessedObjects(Isolate* isolate);
414 
415   // Fills in some heap data in an area from start to end (non-inclusive).  The
416   // space id is used for the write barrier.  The object_address is the address
417   // of the object we are writing into, or NULL if we are not writing into an
418   // object, i.e. if we are writing a series of tagged values that are not on
419   // the heap. Return false if the object content has been deferred.
420   bool ReadData(Object** start, Object** end, int space,
421                 Address object_address);
422   void ReadObject(int space_number, Object** write_back);
423   Address Allocate(int space_index, int size);
424 
425   // Special handling for serialized code like hooking up internalized strings.
426   HeapObject* PostProcessNewObject(HeapObject* obj, int space);
427 
428   // This returns the address of an object that has been described in the
429   // snapshot by chunk index and offset.
430   HeapObject* GetBackReferencedObject(int space);
431 
432   Object** CopyInNativesSource(Vector<const char> source_vector,
433                                Object** current);
434 
435   // Cached current isolate.
436   Isolate* isolate_;
437 
438   // Objects from the attached object descriptions in the serialized user code.
439   Vector<Handle<Object> > attached_objects_;
440 
441   SnapshotByteSource source_;
442   uint32_t magic_number_;
443 
444   // The address of the next object that will be allocated in each space.
445   // Each space has a number of chunks reserved by the GC, with each chunk
446   // fitting into a page. Deserialized objects are allocated into the
447   // current chunk of the target space by bumping up high water mark.
448   Heap::Reservation reservations_[kNumberOfSpaces];
449   uint32_t current_chunk_[kNumberOfPreallocatedSpaces];
450   Address high_water_[kNumberOfPreallocatedSpaces];
451 
452   ExternalReferenceTable* external_reference_table_;
453 
454   List<HeapObject*> deserialized_large_objects_;
455   List<Code*> new_code_objects_;
456   List<Handle<String> > new_internalized_strings_;
457   List<Handle<Script> > new_scripts_;
458 
459   bool deserializing_user_code_;
460 
461   AllocationAlignment next_alignment_;
462 
463   DISALLOW_COPY_AND_ASSIGN(Deserializer);
464 };
465 
466 
467 class CodeAddressMap;
468 
469 // There can be only one serializer per V8 process.
470 class Serializer : public SerializerDeserializer {
471  public:
472   Serializer(Isolate* isolate, SnapshotByteSink* sink);
473   ~Serializer() override;
474   void VisitPointers(Object** start, Object** end) override;
475 
476   void EncodeReservations(List<SerializedData::Reservation>* out) const;
477 
478   void SerializeDeferredObjects();
479 
isolate()480   Isolate* isolate() const { return isolate_; }
481 
back_reference_map()482   BackReferenceMap* back_reference_map() { return &back_reference_map_; }
root_index_map()483   RootIndexMap* root_index_map() { return &root_index_map_; }
484 
485 #ifdef OBJECT_PRINT
486   void CountInstanceType(Map* map, int size);
487 #endif  // OBJECT_PRINT
488 
489  protected:
490   class ObjectSerializer;
491   class RecursionScope {
492    public:
RecursionScope(Serializer * serializer)493     explicit RecursionScope(Serializer* serializer) : serializer_(serializer) {
494       serializer_->recursion_depth_++;
495     }
~RecursionScope()496     ~RecursionScope() { serializer_->recursion_depth_--; }
ExceedsMaximum()497     bool ExceedsMaximum() {
498       return serializer_->recursion_depth_ >= kMaxRecursionDepth;
499     }
500 
501    private:
502     static const int kMaxRecursionDepth = 32;
503     Serializer* serializer_;
504   };
505 
506   virtual void SerializeObject(HeapObject* o, HowToCode how_to_code,
507                                WhereToPoint where_to_point, int skip) = 0;
508 
509   void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
510                int skip);
511 
512   void PutBackReference(HeapObject* object, BackReference reference);
513 
514   // Emit alignment prefix if necessary, return required padding space in bytes.
515   int PutAlignmentPrefix(HeapObject* object);
516 
517   // Returns true if the object was successfully serialized.
518   bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
519                             WhereToPoint where_to_point, int skip);
520 
FlushSkip(int skip)521   inline void FlushSkip(int skip) {
522     if (skip != 0) {
523       sink_->Put(kSkip, "SkipFromSerializeObject");
524       sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
525     }
526   }
527 
528   bool BackReferenceIsAlreadyAllocated(BackReference back_reference);
529 
530   // This will return the space for an object.
531   BackReference AllocateLargeObject(int size);
532   BackReference Allocate(AllocationSpace space, int size);
EncodeExternalReference(Address addr)533   int EncodeExternalReference(Address addr) {
534     return external_reference_encoder_.Encode(addr);
535   }
536 
537   // GetInt reads 4 bytes at once, requiring padding at the end.
538   void Pad();
539 
540   // Some roots should not be serialized, because their actual value depends on
541   // absolute addresses and they are reset after deserialization, anyway.
542   bool ShouldBeSkipped(Object** current);
543 
544   // We may not need the code address map for logging for every instance
545   // of the serializer.  Initialize it on demand.
546   void InitializeCodeAddressMap();
547 
548   Code* CopyCode(Code* code);
549 
max_chunk_size(int space)550   inline uint32_t max_chunk_size(int space) const {
551     DCHECK_LE(0, space);
552     DCHECK_LT(space, kNumberOfSpaces);
553     return max_chunk_size_[space];
554   }
555 
sink()556   SnapshotByteSink* sink() const { return sink_; }
557 
QueueDeferredObject(HeapObject * obj)558   void QueueDeferredObject(HeapObject* obj) {
559     DCHECK(back_reference_map_.Lookup(obj).is_valid());
560     deferred_objects_.Add(obj);
561   }
562 
563   void OutputStatistics(const char* name);
564 
565   Isolate* isolate_;
566 
567   SnapshotByteSink* sink_;
568   ExternalReferenceEncoder external_reference_encoder_;
569 
570   BackReferenceMap back_reference_map_;
571   RootIndexMap root_index_map_;
572 
573   int recursion_depth_;
574 
575   friend class Deserializer;
576   friend class ObjectSerializer;
577   friend class RecursionScope;
578   friend class SnapshotData;
579 
580  private:
581   CodeAddressMap* code_address_map_;
582   // Objects from the same space are put into chunks for bulk-allocation
583   // when deserializing. We have to make sure that each chunk fits into a
584   // page. So we track the chunk size in pending_chunk_ of a space, but
585   // when it exceeds a page, we complete the current chunk and start a new one.
586   uint32_t pending_chunk_[kNumberOfPreallocatedSpaces];
587   List<uint32_t> completed_chunks_[kNumberOfPreallocatedSpaces];
588   uint32_t max_chunk_size_[kNumberOfPreallocatedSpaces];
589 
590   // We map serialized large objects to indexes for back-referencing.
591   uint32_t large_objects_total_size_;
592   uint32_t seen_large_objects_index_;
593 
594   List<byte> code_buffer_;
595 
596   // To handle stack overflow.
597   List<HeapObject*> deferred_objects_;
598 
599 #ifdef OBJECT_PRINT
600   static const int kInstanceTypes = 256;
601   int* instance_type_count_;
602   size_t* instance_type_size_;
603 #endif  // OBJECT_PRINT
604 
605   DISALLOW_COPY_AND_ASSIGN(Serializer);
606 };
607 
608 
609 class PartialSerializer : public Serializer {
610  public:
PartialSerializer(Isolate * isolate,Serializer * startup_snapshot_serializer,SnapshotByteSink * sink)611   PartialSerializer(Isolate* isolate, Serializer* startup_snapshot_serializer,
612                     SnapshotByteSink* sink)
613       : Serializer(isolate, sink),
614         startup_serializer_(startup_snapshot_serializer),
615         global_object_(NULL) {
616     InitializeCodeAddressMap();
617   }
618 
~PartialSerializer()619   ~PartialSerializer() override { OutputStatistics("PartialSerializer"); }
620 
621   // Serialize the objects reachable from a single object pointer.
622   void Serialize(Object** o);
623   void SerializeObject(HeapObject* o, HowToCode how_to_code,
624                        WhereToPoint where_to_point, int skip) override;
625 
626  private:
627   int PartialSnapshotCacheIndex(HeapObject* o);
628   bool ShouldBeInThePartialSnapshotCache(HeapObject* o);
629 
630   Serializer* startup_serializer_;
631   Object* global_object_;
632   PartialCacheIndexMap partial_cache_index_map_;
633   DISALLOW_COPY_AND_ASSIGN(PartialSerializer);
634 };
635 
636 
637 class StartupSerializer : public Serializer {
638  public:
639   StartupSerializer(Isolate* isolate, SnapshotByteSink* sink);
~StartupSerializer()640   ~StartupSerializer() override { OutputStatistics("StartupSerializer"); }
641 
642   // The StartupSerializer has to serialize the root array, which is slightly
643   // different.
644   void VisitPointers(Object** start, Object** end) override;
645 
646   // Serialize the current state of the heap.  The order is:
647   // 1) Strong references.
648   // 2) Partial snapshot cache.
649   // 3) Weak references (e.g. the string table).
650   virtual void SerializeStrongReferences();
651   void SerializeObject(HeapObject* o, HowToCode how_to_code,
652                        WhereToPoint where_to_point, int skip) override;
653   void SerializeWeakReferencesAndDeferred();
654 
655  private:
656   intptr_t root_index_wave_front_;
657   DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
658 };
659 
660 
661 class CodeSerializer : public Serializer {
662  public:
663   static ScriptData* Serialize(Isolate* isolate,
664                                Handle<SharedFunctionInfo> info,
665                                Handle<String> source);
666 
667   MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
668       Isolate* isolate, ScriptData* cached_data, Handle<String> source);
669 
670   static const int kSourceObjectIndex = 0;
671   STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
672 
673   static const int kCodeStubsBaseIndex = 1;
674 
source()675   String* source() const {
676     DCHECK(!AllowHeapAllocation::IsAllowed());
677     return source_;
678   }
679 
stub_keys()680   const List<uint32_t>* stub_keys() const { return &stub_keys_; }
681 
682  private:
CodeSerializer(Isolate * isolate,SnapshotByteSink * sink,String * source)683   CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
684       : Serializer(isolate, sink), source_(source) {
685     back_reference_map_.AddSourceString(source);
686   }
687 
~CodeSerializer()688   ~CodeSerializer() override { OutputStatistics("CodeSerializer"); }
689 
690   void SerializeObject(HeapObject* o, HowToCode how_to_code,
691                        WhereToPoint where_to_point, int skip) override;
692 
693   void SerializeBuiltin(int builtin_index, HowToCode how_to_code,
694                         WhereToPoint where_to_point);
695   void SerializeIC(Code* ic, HowToCode how_to_code,
696                    WhereToPoint where_to_point);
697   void SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
698                          WhereToPoint where_to_point);
699   void SerializeGeneric(HeapObject* heap_object, HowToCode how_to_code,
700                         WhereToPoint where_to_point);
701   int AddCodeStubKey(uint32_t stub_key);
702 
703   DisallowHeapAllocation no_gc_;
704   String* source_;
705   List<uint32_t> stub_keys_;
706   DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
707 };
708 
709 
710 // Wrapper around reservation sizes and the serialization payload.
711 class SnapshotData : public SerializedData {
712  public:
713   // Used when producing.
714   explicit SnapshotData(const Serializer& ser);
715 
716   // Used when consuming.
SnapshotData(const Vector<const byte> snapshot)717   explicit SnapshotData(const Vector<const byte> snapshot)
718       : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
719     CHECK(IsSane());
720   }
721 
722   Vector<const Reservation> Reservations() const;
723   Vector<const byte> Payload() const;
724 
RawData()725   Vector<const byte> RawData() const {
726     return Vector<const byte>(data_, size_);
727   }
728 
729  private:
730   bool IsSane();
731 
732   // The data header consists of uint32_t-sized entries:
733   // [0] magic number and external reference count
734   // [1] version hash
735   // [2] number of reservation size entries
736   // [3] payload length
737   // ... reservations
738   // ... serialized payload
739   static const int kCheckSumOffset = kMagicNumberOffset + kInt32Size;
740   static const int kNumReservationsOffset = kCheckSumOffset + kInt32Size;
741   static const int kPayloadLengthOffset = kNumReservationsOffset + kInt32Size;
742   static const int kHeaderSize = kPayloadLengthOffset + kInt32Size;
743 };
744 
745 
746 // Wrapper around ScriptData to provide code-serializer-specific functionality.
747 class SerializedCodeData : public SerializedData {
748  public:
749   // Used when consuming.
750   static SerializedCodeData* FromCachedData(Isolate* isolate,
751                                             ScriptData* cached_data,
752                                             String* source);
753 
754   // Used when producing.
755   SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
756 
757   // Return ScriptData object and relinquish ownership over it to the caller.
758   ScriptData* GetScriptData();
759 
760   Vector<const Reservation> Reservations() const;
761   Vector<const byte> Payload() const;
762 
763   Vector<const uint32_t> CodeStubKeys() const;
764 
765  private:
766   explicit SerializedCodeData(ScriptData* data);
767 
768   enum SanityCheckResult {
769     CHECK_SUCCESS = 0,
770     MAGIC_NUMBER_MISMATCH = 1,
771     VERSION_MISMATCH = 2,
772     SOURCE_MISMATCH = 3,
773     CPU_FEATURES_MISMATCH = 4,
774     FLAGS_MISMATCH = 5,
775     CHECKSUM_MISMATCH = 6
776   };
777 
778   SanityCheckResult SanityCheck(Isolate* isolate, String* source) const;
779 
780   uint32_t SourceHash(String* source) const;
781 
782   // The data header consists of uint32_t-sized entries:
783   // [0] magic number and external reference count
784   // [1] version hash
785   // [2] source hash
786   // [3] cpu features
787   // [4] flag hash
788   // [5] number of code stub keys
789   // [6] number of reservation size entries
790   // [7] payload length
791   // [8] payload checksum part 1
792   // [9] payload checksum part 2
793   // ...  reservations
794   // ...  code stub keys
795   // ...  serialized payload
796   static const int kVersionHashOffset = kMagicNumberOffset + kInt32Size;
797   static const int kSourceHashOffset = kVersionHashOffset + kInt32Size;
798   static const int kCpuFeaturesOffset = kSourceHashOffset + kInt32Size;
799   static const int kFlagHashOffset = kCpuFeaturesOffset + kInt32Size;
800   static const int kNumReservationsOffset = kFlagHashOffset + kInt32Size;
801   static const int kNumCodeStubKeysOffset = kNumReservationsOffset + kInt32Size;
802   static const int kPayloadLengthOffset = kNumCodeStubKeysOffset + kInt32Size;
803   static const int kChecksum1Offset = kPayloadLengthOffset + kInt32Size;
804   static const int kChecksum2Offset = kChecksum1Offset + kInt32Size;
805   static const int kHeaderSize = kChecksum2Offset + kInt32Size;
806 };
807 }  // namespace internal
808 }  // namespace v8
809 
810 #endif  // V8_SNAPSHOT_SERIALIZE_H_
811