• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_CODE_RANGE_H_
6 #define V8_HEAP_CODE_RANGE_H_
7 
8 #include <unordered_map>
9 #include <vector>
10 
11 #include "src/base/platform/mutex.h"
12 #include "src/common/globals.h"
13 #include "src/utils/allocation.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 // The process-wide singleton that keeps track of code range regions with the
19 // intention to reuse free code range regions as a workaround for CFG memory
20 // leaks (see crbug.com/870054).
21 class CodeRangeAddressHint {
22  public:
23   // When near code range is enabled, an address within
24   // kMaxPCRelativeCodeRangeInMB to the embedded blob is returned if
25   // there is enough space. Otherwise a random address is returned.
26   // When near code range is disabled, returns the most recently freed code
27   // range start address for the given size. If there is no such entry, then a
28   // random address is returned.
29   V8_EXPORT_PRIVATE Address GetAddressHint(size_t code_range_size,
30                                            size_t alignment);
31 
32   V8_EXPORT_PRIVATE void NotifyFreedCodeRange(Address code_range_start,
33                                               size_t code_range_size);
34 
35  private:
36   base::Mutex mutex_;
37   // A map from code range size to an array of recently freed code range
38   // addresses. There should be O(1) different code range sizes.
39   // The length of each array is limited by the peak number of code ranges,
40   // which should be also O(1).
41   std::unordered_map<size_t, std::vector<Address>> recently_freed_;
42 };
43 
44 // A code range is a virtual memory cage that may contain executable code. It
45 // has the following layout.
46 //
47 // +------------+-----+----------------  ~~~  -+
48 // |     RW     | ... |    ...                 |
49 // +------------+-----+----------------- ~~~  -+
50 // ^            ^     ^
51 // start        base  allocatable base
52 //
53 // <------------>     <------------------------>
54 //   reserved            allocatable region
55 // <------------------------------------------->
56 //               code region
57 //
58 // The start of the reservation may include reserved page with read-write access
59 // as required by some platforms (Win64). The cage's page allocator does not
60 // control the optional reserved page in the beginning of the code region.
61 //
62 // The following conditions hold:
63 // 1) |reservation()->region()| >= |optional RW pages| +
64 //    |reservation()->page_allocator()|
65 // 2) |reservation()| is AllocatePageSize()-aligned
66 // 3) |reservation()->page_allocator()| (i.e. allocatable base) is
67 //    MemoryChunk::kAlignment-aligned
68 // 4) |base()| is CommitPageSize()-aligned
69 class CodeRange final : public VirtualMemoryCage {
70  public:
71   V8_EXPORT_PRIVATE ~CodeRange() override;
72 
73   // Returns the size of the initial area of a code-range, which is marked
74   // writable and reserved to contain unwind information.
75   static size_t GetWritableReservedAreaSize();
76 
embedded_blob_code_copy()77   uint8_t* embedded_blob_code_copy() const {
78     // remap_embedded_builtins_mutex_ is designed to protect write contention to
79     // embedded_blob_code_copy_. It is safe to be read without taking the
80     // mutex. It is read to check if short builtins ought to be enabled because
81     // a shared CodeRange has already remapped builtins and to find where the
82     // instruction stream for a builtin is.
83     //
84     // For the first, this racing with an Isolate calling RemapEmbeddedBuiltins
85     // may result in disabling short builtins, which is not a correctness issue.
86     //
87     // For the second, this racing with an Isolate calling RemapEmbeddedBuiltins
88     // may result in an already running Isolate that did not have short builtins
89     // enabled (due to max old generation size) to switch over to using remapped
90     // builtins, which is also not a correctness issue as the remapped builtins
91     // are byte-equivalent.
92     //
93     // Both these scenarios should be rare. The initial Isolate is usually
94     // created by itself, i.e. without contention. Additionally, the first
95     // Isolate usually remaps builtins on machines with enough memory, not
96     // subsequent Isolates in the same process.
97     return embedded_blob_code_copy_.load(std::memory_order_acquire);
98   }
99 
100 #ifdef V8_OS_WIN64
101   // 64-bit Windows needs to track how many Isolates are using the CodeRange for
102   // registering and unregistering of unwind info. Note that even though
103   // CodeRanges are used with std::shared_ptr, std::shared_ptr::use_count should
104   // not be used for synchronization as it's usually implemented with a relaxed
105   // read.
AtomicIncrementUnwindInfoUseCount()106   uint32_t AtomicIncrementUnwindInfoUseCount() {
107     return unwindinfo_use_count_.fetch_add(1, std::memory_order_acq_rel);
108   }
109 
AtomicDecrementUnwindInfoUseCount()110   uint32_t AtomicDecrementUnwindInfoUseCount() {
111     return unwindinfo_use_count_.fetch_sub(1, std::memory_order_acq_rel);
112   }
113 #endif  // V8_OS_WIN64
114 
115   bool InitReservation(v8::PageAllocator* page_allocator, size_t requested);
116 
117   void Free();
118 
119   // Remap and copy the embedded builtins into this CodeRange. This method is
120   // idempotent and only performs the copy once. This property is so that this
121   // method can be used uniformly regardless of having a per-Isolate or a shared
122   // pointer cage. Returns the address of the copy.
123   //
124   // The builtins code region will be freed with the code range at tear down.
125   //
126   // When ENABLE_SLOW_DCHECKS is on, the contents of the embedded_blob_code are
127   // compared against the already copied version.
128   uint8_t* RemapEmbeddedBuiltins(Isolate* isolate,
129                                  const uint8_t* embedded_blob_code,
130                                  size_t embedded_blob_code_size);
131 
132   static std::shared_ptr<CodeRange> EnsureProcessWideCodeRange(
133       v8::PageAllocator* page_allocator, size_t requested_size);
134 
135   // If InitializeProcessWideCodeRangeOnce has been called, returns the
136   // initialized CodeRange. Otherwise returns an empty std::shared_ptr.
137   V8_EXPORT_PRIVATE static std::shared_ptr<CodeRange> GetProcessWideCodeRange();
138 
139  private:
140   // Used when short builtin calls are enabled, where embedded builtins are
141   // copied into the CodeRange so calls can be nearer.
142   std::atomic<uint8_t*> embedded_blob_code_copy_{nullptr};
143 
144   // When sharing a CodeRange among Isolates, calls to RemapEmbeddedBuiltins may
145   // race during Isolate::Init.
146   base::Mutex remap_embedded_builtins_mutex_;
147 
148 #ifdef V8_OS_WIN64
149   std::atomic<uint32_t> unwindinfo_use_count_{0};
150 #endif
151 };
152 
153 }  // namespace internal
154 }  // namespace v8
155 
156 #endif  // V8_HEAP_CODE_RANGE_H_
157