1 // Copyright 2016 the V8 project authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #ifndef V8_SNAPSHOT_SERIALIZER_H_ 6 #define V8_SNAPSHOT_SERIALIZER_H_ 7 8 #include "src/codegen/external-reference-encoder.h" 9 #include "src/common/assert-scope.h" 10 #include "src/execution/isolate.h" 11 #include "src/handles/global-handles.h" 12 #include "src/logging/log.h" 13 #include "src/objects/objects.h" 14 #include "src/snapshot/embedded/embedded-data.h" 15 #include "src/snapshot/serializer-deserializer.h" 16 #include "src/snapshot/snapshot-source-sink.h" 17 #include "src/snapshot/snapshot.h" 18 #include "src/utils/identity-map.h" 19 20 namespace v8 { 21 namespace internal { 22 23 class CodeAddressMap : public CodeEventLogger { 24 public: CodeAddressMap(Isolate * isolate)25 explicit CodeAddressMap(Isolate* isolate) : CodeEventLogger(isolate) { 26 isolate->logger()->AddCodeEventListener(this); 27 } 28 ~CodeAddressMap()29 ~CodeAddressMap() override { 30 isolate_->logger()->RemoveCodeEventListener(this); 31 } 32 CodeMoveEvent(AbstractCode from,AbstractCode to)33 void CodeMoveEvent(AbstractCode from, AbstractCode to) override { 34 address_to_name_map_.Move(from.address(), to.address()); 35 } 36 CodeDisableOptEvent(Handle<AbstractCode> code,Handle<SharedFunctionInfo> shared)37 void CodeDisableOptEvent(Handle<AbstractCode> code, 38 Handle<SharedFunctionInfo> shared) override {} 39 Lookup(Address address)40 const char* Lookup(Address address) { 41 return address_to_name_map_.Lookup(address); 42 } 43 44 private: 45 class NameMap { 46 public: NameMap()47 NameMap() : impl_() {} 48 NameMap(const NameMap&) = delete; 49 NameMap& operator=(const NameMap&) = delete; 50 ~NameMap()51 ~NameMap() { 52 for (base::HashMap::Entry* p = impl_.Start(); p != nullptr; 53 p = impl_.Next(p)) { 54 DeleteArray(static_cast<const char*>(p->value)); 55 } 56 } 57 Insert(Address code_address,const char * name,int name_size)58 void Insert(Address code_address, const char* name, int name_size) { 59 base::HashMap::Entry* entry = FindOrCreateEntry(code_address); 60 if (entry->value == nullptr) { 61 entry->value = CopyName(name, name_size); 62 } 63 } 64 Lookup(Address code_address)65 const char* Lookup(Address code_address) { 66 base::HashMap::Entry* entry = FindEntry(code_address); 67 return (entry != nullptr) ? static_cast<const char*>(entry->value) 68 : nullptr; 69 } 70 Remove(Address code_address)71 void Remove(Address code_address) { 72 base::HashMap::Entry* entry = FindEntry(code_address); 73 if (entry != nullptr) { 74 DeleteArray(static_cast<char*>(entry->value)); 75 RemoveEntry(entry); 76 } 77 } 78 Move(Address from,Address to)79 void Move(Address from, Address to) { 80 if (from == to) return; 81 base::HashMap::Entry* from_entry = FindEntry(from); 82 DCHECK_NOT_NULL(from_entry); 83 void* value = from_entry->value; 84 RemoveEntry(from_entry); 85 base::HashMap::Entry* to_entry = FindOrCreateEntry(to); 86 DCHECK_NULL(to_entry->value); 87 to_entry->value = value; 88 } 89 90 private: CopyName(const char * name,int name_size)91 static char* CopyName(const char* name, int name_size) { 92 char* result = NewArray<char>(name_size + 1); 93 for (int i = 0; i < name_size; ++i) { 94 char c = name[i]; 95 if (c == '\0') c = ' '; 96 result[i] = c; 97 } 98 result[name_size] = '\0'; 99 return result; 100 } 101 FindOrCreateEntry(Address code_address)102 base::HashMap::Entry* FindOrCreateEntry(Address code_address) { 103 return impl_.LookupOrInsert(reinterpret_cast<void*>(code_address), 104 ComputeAddressHash(code_address)); 105 } 106 FindEntry(Address code_address)107 base::HashMap::Entry* FindEntry(Address code_address) { 108 return impl_.Lookup(reinterpret_cast<void*>(code_address), 109 ComputeAddressHash(code_address)); 110 } 111 RemoveEntry(base::HashMap::Entry * entry)112 void RemoveEntry(base::HashMap::Entry* entry) { 113 impl_.Remove(entry->key, entry->hash); 114 } 115 116 base::HashMap impl_; 117 }; 118 LogRecordedBuffer(Handle<AbstractCode> code,MaybeHandle<SharedFunctionInfo>,const char * name,int length)119 void LogRecordedBuffer(Handle<AbstractCode> code, 120 MaybeHandle<SharedFunctionInfo>, const char* name, 121 int length) override { 122 address_to_name_map_.Insert(code->address(), name, length); 123 } 124 125 #if V8_ENABLE_WEBASSEMBLY LogRecordedBuffer(const wasm::WasmCode * code,const char * name,int length)126 void LogRecordedBuffer(const wasm::WasmCode* code, const char* name, 127 int length) override { 128 UNREACHABLE(); 129 } 130 #endif // V8_ENABLE_WEBASSEMBLY 131 132 NameMap address_to_name_map_; 133 }; 134 135 class ObjectCacheIndexMap { 136 public: ObjectCacheIndexMap(Heap * heap)137 explicit ObjectCacheIndexMap(Heap* heap) : map_(heap), next_index_(0) {} 138 ObjectCacheIndexMap(const ObjectCacheIndexMap&) = delete; 139 ObjectCacheIndexMap& operator=(const ObjectCacheIndexMap&) = delete; 140 141 // If |obj| is in the map, immediately return true. Otherwise add it to the 142 // map and return false. In either case set |*index_out| to the index 143 // associated with the map. LookupOrInsert(HeapObject obj,int * index_out)144 bool LookupOrInsert(HeapObject obj, int* index_out) { 145 auto find_result = map_.FindOrInsert(obj); 146 if (!find_result.already_exists) { 147 *find_result.entry = next_index_++; 148 } 149 *index_out = *find_result.entry; 150 return find_result.already_exists; 151 } LookupOrInsert(Handle<HeapObject> obj,int * index_out)152 bool LookupOrInsert(Handle<HeapObject> obj, int* index_out) { 153 return LookupOrInsert(*obj, index_out); 154 } 155 Lookup(HeapObject obj,int * index_out)156 bool Lookup(HeapObject obj, int* index_out) const { 157 int* index = map_.Find(obj); 158 if (index == nullptr) { 159 return false; 160 } 161 *index_out = *index; 162 return true; 163 } 164 165 Handle<FixedArray> Values(Isolate* isolate); 166 size()167 int size() const { return next_index_; } 168 169 private: 170 IdentityMap<int, base::DefaultAllocationPolicy> map_; 171 int next_index_; 172 }; 173 174 class Serializer : public SerializerDeserializer { 175 public: 176 Serializer(Isolate* isolate, Snapshot::SerializerFlags flags); ~Serializer()177 ~Serializer() override { DCHECK_EQ(unresolved_forward_refs_, 0); } 178 Serializer(const Serializer&) = delete; 179 Serializer& operator=(const Serializer&) = delete; 180 Payload()181 const std::vector<byte>* Payload() const { return sink_.data(); } 182 ReferenceMapContains(Handle<HeapObject> o)183 bool ReferenceMapContains(Handle<HeapObject> o) { 184 return reference_map()->LookupReference(o) != nullptr; 185 } 186 isolate()187 Isolate* isolate() const { return isolate_; } 188 189 // The pointer compression cage base value used for decompression of all 190 // tagged values except references to Code objects. cage_base()191 PtrComprCageBase cage_base() const { 192 #if V8_COMPRESS_POINTERS 193 return cage_base_; 194 #else 195 return PtrComprCageBase{}; 196 #endif // V8_COMPRESS_POINTERS 197 } 198 199 int TotalAllocationSize() const; 200 201 protected: 202 using PendingObjectReferences = std::vector<int>*; 203 204 class ObjectSerializer; 205 class V8_NODISCARD RecursionScope { 206 public: RecursionScope(Serializer * serializer)207 explicit RecursionScope(Serializer* serializer) : serializer_(serializer) { 208 serializer_->recursion_depth_++; 209 } ~RecursionScope()210 ~RecursionScope() { serializer_->recursion_depth_--; } ExceedsMaximum()211 bool ExceedsMaximum() { 212 return serializer_->recursion_depth_ >= kMaxRecursionDepth; 213 } 214 215 private: 216 static const int kMaxRecursionDepth = 32; 217 Serializer* serializer_; 218 }; 219 220 // Compares obj with not_mapped_symbol root. When V8_EXTERNAL_CODE_SPACE is 221 // enabled it compares full pointers. 222 V8_INLINE bool IsNotMappedSymbol(HeapObject obj) const; 223 224 void SerializeDeferredObjects(); 225 void SerializeObject(Handle<HeapObject> o); 226 virtual void SerializeObjectImpl(Handle<HeapObject> o) = 0; 227 228 virtual bool MustBeDeferred(HeapObject object); 229 230 void VisitRootPointers(Root root, const char* description, 231 FullObjectSlot start, FullObjectSlot end) override; 232 void SerializeRootObject(FullObjectSlot slot); 233 234 void PutRoot(RootIndex root_index); 235 void PutSmiRoot(FullObjectSlot slot); 236 void PutBackReference(HeapObject object, SerializerReference reference); 237 void PutAttachedReference(SerializerReference reference); 238 void PutNextChunk(SnapshotSpace space); 239 void PutRepeat(int repeat_count); 240 241 // Emit a marker noting that this slot is a forward reference to the an 242 // object which has not yet been serialized. 243 void PutPendingForwardReference(PendingObjectReferences& ref); 244 // Resolve the given previously registered forward reference to the current 245 // object. 246 void ResolvePendingForwardReference(int obj); 247 248 // Returns true if the object was successfully serialized as a root. 249 bool SerializeRoot(HeapObject obj); 250 251 // Returns true if the object was successfully serialized as hot object. 252 bool SerializeHotObject(HeapObject obj); 253 254 // Returns true if the object was successfully serialized as back reference. 255 bool SerializeBackReference(HeapObject obj); 256 257 // Returns true if the object was successfully serialized as pending object. 258 bool SerializePendingObject(HeapObject obj); 259 260 // Returns true if the given heap object is a bytecode handler code object. 261 bool ObjectIsBytecodeHandler(HeapObject obj) const; 262 263 ExternalReferenceEncoder::Value EncodeExternalReference(Address addr); 264 TryEncodeExternalReference(Address addr)265 Maybe<ExternalReferenceEncoder::Value> TryEncodeExternalReference( 266 Address addr) { 267 return external_reference_encoder_.TryEncode(addr); 268 } 269 270 // GetInt reads 4 bytes at once, requiring padding at the end. 271 // Use padding_offset to specify the space you want to use after padding. 272 void Pad(int padding_offset = 0); 273 274 // We may not need the code address map for logging for every instance 275 // of the serializer. Initialize it on demand. 276 void InitializeCodeAddressMap(); 277 278 Code CopyCode(Code code); 279 QueueDeferredObject(HeapObject obj)280 void QueueDeferredObject(HeapObject obj) { 281 DCHECK_NULL(reference_map_.LookupReference(obj)); 282 deferred_objects_.Push(obj); 283 } 284 285 // Register that the the given object shouldn't be immediately serialized, but 286 // will be serialized later and any references to it should be pending forward 287 // references. 288 void RegisterObjectIsPending(HeapObject obj); 289 290 // Resolve the given pending object reference with the current object. 291 void ResolvePendingObject(HeapObject obj); 292 293 void OutputStatistics(const char* name); 294 295 void CountAllocation(Map map, int size, SnapshotSpace space); 296 297 #ifdef DEBUG PushStack(Handle<HeapObject> o)298 void PushStack(Handle<HeapObject> o) { stack_.Push(*o); } 299 void PopStack(); 300 void PrintStack(); 301 void PrintStack(std::ostream&); 302 #endif // DEBUG 303 reference_map()304 SerializerReferenceMap* reference_map() { return &reference_map_; } root_index_map()305 const RootIndexMap* root_index_map() const { return &root_index_map_; } 306 307 SnapshotByteSink sink_; // Used directly by subclasses. 308 allow_unknown_external_references_for_testing()309 bool allow_unknown_external_references_for_testing() const { 310 return (flags_ & Snapshot::kAllowUnknownExternalReferencesForTesting) != 0; 311 } allow_active_isolate_for_testing()312 bool allow_active_isolate_for_testing() const { 313 return (flags_ & Snapshot::kAllowActiveIsolateForTesting) != 0; 314 } 315 reconstruct_read_only_and_shared_object_caches_for_testing()316 bool reconstruct_read_only_and_shared_object_caches_for_testing() const { 317 return (flags_ & 318 Snapshot::kReconstructReadOnlyAndSharedObjectCachesForTesting) != 0; 319 } 320 321 private: 322 // A circular queue of hot objects. This is added to in the same order as in 323 // Deserializer::HotObjectsList, but this stores the objects as an array of 324 // raw addresses that are considered strong roots. This allows objects to be 325 // added to the list without having to extend their handle's lifetime. 326 // 327 // We should never allow this class to return Handles to objects in the queue, 328 // as the object in the queue may change if kSize other objects are added to 329 // the queue during that Handle's lifetime. 330 class HotObjectsList { 331 public: 332 explicit HotObjectsList(Heap* heap); 333 ~HotObjectsList(); 334 HotObjectsList(const HotObjectsList&) = delete; 335 HotObjectsList& operator=(const HotObjectsList&) = delete; 336 Add(HeapObject object)337 void Add(HeapObject object) { 338 circular_queue_[index_] = object.ptr(); 339 index_ = (index_ + 1) & kSizeMask; 340 } 341 342 static const int kNotFound = -1; 343 Find(HeapObject object)344 int Find(HeapObject object) { 345 DCHECK(!AllowGarbageCollection::IsAllowed()); 346 for (int i = 0; i < kSize; i++) { 347 if (circular_queue_[i] == object.ptr()) { 348 return i; 349 } 350 } 351 return kNotFound; 352 } 353 354 private: 355 static const int kSize = kHotObjectCount; 356 static const int kSizeMask = kSize - 1; 357 STATIC_ASSERT(base::bits::IsPowerOfTwo(kSize)); 358 Heap* heap_; 359 StrongRootsEntry* strong_roots_entry_; 360 Address circular_queue_[kSize] = {kNullAddress}; 361 int index_ = 0; 362 }; 363 364 // Disallow GC during serialization. 365 // TODO(leszeks, v8:10815): Remove this constraint. 366 DISALLOW_GARBAGE_COLLECTION(no_gc_) 367 368 Isolate* isolate_; 369 #if V8_COMPRESS_POINTERS 370 const PtrComprCageBase cage_base_; 371 #endif // V8_COMPRESS_POINTERS 372 HotObjectsList hot_objects_; 373 SerializerReferenceMap reference_map_; 374 ExternalReferenceEncoder external_reference_encoder_; 375 RootIndexMap root_index_map_; 376 std::unique_ptr<CodeAddressMap> code_address_map_; 377 std::vector<byte> code_buffer_; 378 GlobalHandleVector<HeapObject> 379 deferred_objects_; // To handle stack overflow. 380 int num_back_refs_ = 0; 381 382 // Objects which have started being serialized, but haven't yet been allocated 383 // with the allocator, are considered "pending". References to them don't have 384 // an allocation to backref to, so instead they are registered as pending 385 // forward references, which are resolved once the object is allocated. 386 // 387 // Forward references are registered in a deterministic order, and can 388 // therefore be identified by an incrementing integer index, which is 389 // effectively an index into a vector of the currently registered forward 390 // refs. The references in this vector might not be resolved in order, so we 391 // can only clear it (and reset the indices) when there are no unresolved 392 // forward refs remaining. 393 int next_forward_ref_id_ = 0; 394 int unresolved_forward_refs_ = 0; 395 IdentityMap<PendingObjectReferences, base::DefaultAllocationPolicy> 396 forward_refs_per_pending_object_; 397 398 // Used to keep track of the off-heap backing stores used by TypedArrays/ 399 // ArrayBuffers. Note that the index begins at 1 and not 0, because when a 400 // TypedArray has an on-heap backing store, the backing_store pointer in the 401 // corresponding ArrayBuffer will be null, which makes it indistinguishable 402 // from index 0. 403 uint32_t seen_backing_stores_index_ = 1; 404 405 int recursion_depth_ = 0; 406 const Snapshot::SerializerFlags flags_; 407 408 size_t allocation_size_[kNumberOfSnapshotSpaces] = {0}; 409 #ifdef OBJECT_PRINT 410 static constexpr int kInstanceTypes = LAST_TYPE + 1; 411 std::unique_ptr<int[]> instance_type_count_[kNumberOfSnapshotSpaces]; 412 std::unique_ptr<size_t[]> instance_type_size_[kNumberOfSnapshotSpaces]; 413 #endif // OBJECT_PRINT 414 415 #ifdef DEBUG 416 GlobalHandleVector<HeapObject> back_refs_; 417 GlobalHandleVector<HeapObject> stack_; 418 #endif // DEBUG 419 }; 420 421 class RelocInfoIterator; 422 423 class Serializer::ObjectSerializer : public ObjectVisitor { 424 public: ObjectSerializer(Serializer * serializer,Handle<HeapObject> obj,SnapshotByteSink * sink)425 ObjectSerializer(Serializer* serializer, Handle<HeapObject> obj, 426 SnapshotByteSink* sink) 427 : isolate_(serializer->isolate()), 428 serializer_(serializer), 429 object_(obj), 430 sink_(sink), 431 bytes_processed_so_far_(0) { 432 #ifdef DEBUG 433 serializer_->PushStack(obj); 434 #endif // DEBUG 435 } ~ObjectSerializer()436 ~ObjectSerializer() override { 437 #ifdef DEBUG 438 serializer_->PopStack(); 439 #endif // DEBUG 440 } 441 void Serialize(); 442 void SerializeObject(); 443 void SerializeDeferred(); 444 void VisitPointers(HeapObject host, ObjectSlot start, 445 ObjectSlot end) override; 446 void VisitPointers(HeapObject host, MaybeObjectSlot start, 447 MaybeObjectSlot end) override; 448 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override; 449 void VisitEmbeddedPointer(Code host, RelocInfo* target) override; 450 void VisitExternalReference(Foreign host, Address* p) override; 451 void VisitExternalReference(Code host, RelocInfo* rinfo) override; 452 void VisitExternalPointer(HeapObject host, ExternalPointer_t ptr) override; 453 void VisitInternalReference(Code host, RelocInfo* rinfo) override; 454 void VisitCodeTarget(Code host, RelocInfo* target) override; 455 void VisitRuntimeEntry(Code host, RelocInfo* reloc) override; 456 void VisitOffHeapTarget(Code host, RelocInfo* target) override; 457 isolate()458 Isolate* isolate() { return isolate_; } 459 460 private: 461 class RelocInfoObjectPreSerializer; 462 463 void SerializePrologue(SnapshotSpace space, int size, Map map); 464 465 // This function outputs or skips the raw data between the last pointer and 466 // up to the current position. 467 void SerializeContent(Map map, int size); 468 void OutputExternalReference(Address target, int target_size, bool sandboxify, 469 ExternalPointerTag tag); 470 void OutputRawData(Address up_to); 471 void SerializeCode(Map map, int size); 472 uint32_t SerializeBackingStore(void* backing_store, int32_t byte_length, 473 Maybe<int32_t> max_byte_length); 474 void SerializeJSTypedArray(); 475 void SerializeJSArrayBuffer(); 476 void SerializeExternalString(); 477 void SerializeExternalStringAsSequentialString(); 478 479 Isolate* isolate_; 480 Serializer* serializer_; 481 Handle<HeapObject> object_; 482 SnapshotByteSink* sink_; 483 int bytes_processed_so_far_; 484 }; 485 486 } // namespace internal 487 } // namespace v8 488 489 #endif // V8_SNAPSHOT_SERIALIZER_H_ 490