• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
6 #define V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
7 
8 #include "include/v8config.h"
9 #include "src/base/atomicops.h"
10 #include "src/base/memory.h"
11 #include "src/base/platform/mutex.h"
12 #include "src/common/globals.h"
13 
14 #ifdef V8_SANDBOX_IS_AVAILABLE
15 
16 namespace v8 {
17 namespace internal {
18 
19 class Isolate;
20 
21 /**
22  * A table storing pointers to objects outside the sandbox.
23  *
24  * An external pointer table provides the basic mechanisms to ensure
25  * memory-safe access to objects located outside the sandbox, but referenced
26  * from within it. When an external pointer table is used, objects located
27  * inside the sandbox reference outside objects through indices into the table.
28  *
29  * Type safety can be ensured by using type-specific tags for the external
30  * pointers. These tags will be ORed into the unused top bits of the pointer
31  * when storing them and will be ANDed away when loading the pointer later
32  * again. If a pointer of the wrong type is accessed, some of the top bits will
33  * remain in place, rendering the pointer inaccessible.
34  *
35  * Temporal memory safety is achieved through garbage collection of the table,
36  * which ensures that every entry is either an invalid pointer or a valid
37  * pointer pointing to a live object.
38  *
39  * Spatial memory safety can, if necessary, be ensured by storing the size of a
40  * referenced object together with the object itself outside the sandbox, and
41  * referencing both through a single entry in the table.
42  *
43  * The garbage collection algorithm for the table works as follows:
44  *  - The top bit of every entry is reserved for the marking bit.
45  *  - Every store to an entry automatically sets the marking bit when ORing
46  *    with the tag. This avoids the need for write barriers.
47  *  - Every load of an entry automatically removes the marking bit when ANDing
48  *    with the inverted tag.
49  *  - When the GC marking visitor finds a live object with an external pointer,
50  *    it marks the corresponding entry as alive through Mark(), which sets the
51  *    marking bit using an atomic CAS operation.
52  *  - When marking is finished, Sweep() iterates of the table once while the
53  *    mutator is stopped and builds a freelist from all dead entries while also
54  *    removing the marking bit from any live entry.
55  *
56  * The freelist is a singly-linked list, using the lower 32 bits of each entry
57  * to store the index of the next free entry. When the freelist is empty and a
58  * new entry is allocated, the table grows in place and the freelist is
59  * re-populated from the newly added entries.
60  */
61 class V8_EXPORT_PRIVATE ExternalPointerTable {
62  public:
63   // Size of an ExternalPointerTable, for layout computation in IsolateData.
64   // Asserted to be equal to the actual size in external-pointer-table.cc.
65   static int constexpr kSize = 3 * kSystemPointerSize;
66 
67   ExternalPointerTable() = default;
68 
69   // Initializes this external pointer table by reserving the backing memory
70   // and initializing the freelist.
71   inline void Init(Isolate* isolate);
72 
73   // Resets this external pointer table and deletes all associated memory.
74   inline void TearDown();
75 
76   // Retrieves the entry at the given index.
77   //
78   // This method is atomic and can be called from background threads.
79   inline Address Get(uint32_t index, ExternalPointerTag tag) const;
80 
81   // Sets the entry at the given index to the given value.
82   //
83   // This method is atomic and can be called from background threads.
84   inline void Set(uint32_t index, Address value, ExternalPointerTag tag);
85 
86   // Allocates a new entry in the external pointer table. The caller must
87   // initialize the entry afterwards through set(). In particular, the caller is
88   // responsible for setting the mark bit of the new entry.
89   // TODO(saelo) this can fail, in which case we should probably do GC + retry.
90   //
91   // This method is atomic and can be called from background threads.
92   inline uint32_t Allocate();
93 
94   // Runtime function called from CSA. Internally just calls Allocate().
95   static uint32_t AllocateEntry(ExternalPointerTable* table);
96 
97   // Marks the specified entry as alive.
98   //
99   // This method is atomic and can be called from background threads.
100   inline void Mark(uint32_t index);
101 
102   // Frees unmarked entries.
103   //
104   // This method must be called on the mutator thread or while that thread is
105   // stopped.
106   //
107   // Returns the number of live entries after sweeping.
108   uint32_t Sweep(Isolate* isolate);
109 
110  private:
111   // Required for Isolate::CheckIsolateLayout().
112   friend class Isolate;
113 
114   // An external pointer table grows in blocks of this size. This is also the
115   // initial size of the table.
116   static const size_t kBlockSize = 64 * KB;
117   static const size_t kEntriesPerBlock = kBlockSize / kSystemPointerSize;
118 
119   static const Address kExternalPointerMarkBit = 1ULL << 63;
120 
121   // Returns true if this external pointer table has been initialized.
is_initialized()122   bool is_initialized() { return buffer_ != kNullAddress; }
123 
124   // Extends the table and adds newly created entries to the freelist. Returns
125   // the new freelist head. When calling this method, mutex_ must be locked.
126   //
127   // TODO(saelo) this can fail, deal with that appropriately.
128   uint32_t Grow();
129 
130   // Computes the address of the specified entry.
entry_address(uint32_t index)131   inline Address entry_address(uint32_t index) const {
132     return buffer_ + index * sizeof(Address);
133   }
134 
135   // Loads the value at the given index. This method is non-atomic, only use it
136   // when no other threads can currently access the table.
load(uint32_t index)137   inline Address load(uint32_t index) const {
138     return base::Memory<Address>(entry_address(index));
139   }
140 
141   // Stores the provided value at the given index. This method is non-atomic,
142   // only use it when no other threads can currently access the table.
store(uint32_t index,Address value)143   inline void store(uint32_t index, Address value) {
144     base::Memory<Address>(entry_address(index)) = value;
145   }
146 
147   // Atomically loads the value at the given index.
load_atomic(uint32_t index)148   inline Address load_atomic(uint32_t index) const {
149     auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
150     return base::Relaxed_Load(addr);
151   }
152 
153   // Atomically stores the provided value at the given index.
store_atomic(uint32_t index,Address value)154   inline void store_atomic(uint32_t index, Address value) {
155     auto addr = reinterpret_cast<base::Atomic64*>(entry_address(index));
156     base::Relaxed_Store(addr, value);
157   }
158 
is_marked(Address entry)159   static bool is_marked(Address entry) {
160     return (entry & kExternalPointerMarkBit) == kExternalPointerMarkBit;
161   }
162 
set_mark_bit(Address entry)163   static Address set_mark_bit(Address entry) {
164     return entry | kExternalPointerMarkBit;
165   }
166 
clear_mark_bit(Address entry)167   static Address clear_mark_bit(Address entry) {
168     return entry & ~kExternalPointerMarkBit;
169   }
170 
is_free(Address entry)171   static bool is_free(Address entry) {
172     return (entry & kExternalPointerFreeEntryTag) ==
173            kExternalPointerFreeEntryTag;
174   }
175 
make_freelist_entry(uint32_t current_freelist_head)176   static Address make_freelist_entry(uint32_t current_freelist_head) {
177     // The next freelist entry is stored in the lower 32 bits of the entry.
178     Address entry = current_freelist_head;
179     return entry | kExternalPointerFreeEntryTag;
180   }
181 
182   // The buffer backing this table. This is const after initialization. Should
183   // only be accessed using the load_x() and store_x() methods, which take care
184   // of atomicicy if necessary.
185   Address buffer_ = kNullAddress;
186 
187   // The current capacity of this table, which is the number of usable entries.
188   uint32_t capacity_ = 0;
189 
190   // The index of the first entry on the freelist or zero if the list is empty.
191   uint32_t freelist_head_ = 0;
192 
193   // Lock protecting the slow path for entry allocation, in particular Grow().
194   // As the size of this structure must be predictable (it's part of
195   // IsolateData), it cannot directly contain a Mutex and so instead contains a
196   // pointer to one.
197   base::Mutex* mutex_ = nullptr;
198 };
199 
200 }  // namespace internal
201 }  // namespace v8
202 
203 #endif  // V8_SANDBOX_IS_AVAILABLE
204 
205 #endif  // V8_SANDBOX_EXTERNAL_POINTER_TABLE_H_
206