1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/handles/handles.h"
6
7 #include "src/api/api.h"
8 #include "src/base/logging.h"
9 #include "src/codegen/optimized-compilation-info.h"
10 #include "src/execution/isolate.h"
11 #include "src/execution/thread-id.h"
12 #include "src/handles/maybe-handles.h"
13 #include "src/objects/objects-inl.h"
14 #include "src/roots/roots-inl.h"
15 #include "src/utils/address-map.h"
16 #include "src/utils/identity-map.h"
17
18 #ifdef V8_ENABLE_MAGLEV
19 #include "src/maglev/maglev-concurrent-dispatcher.h"
20 #endif // V8_ENABLE_MAGLEV
21
22 #ifdef DEBUG
23 // For GetIsolateFromWritableHeapObject.
24 #include "src/heap/heap-write-barrier-inl.h"
25 #endif
26
27 namespace v8 {
28 namespace internal {
29
30 // Handles should be trivially copyable so that they can be efficiently passed
31 // by value. If they are not trivially copyable, they cannot be passed in
32 // registers.
33 ASSERT_TRIVIALLY_COPYABLE(HandleBase);
34 ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
35 ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
36
37 #ifdef DEBUG
IsDereferenceAllowed() const38 bool HandleBase::IsDereferenceAllowed() const {
39 DCHECK_NOT_NULL(location_);
40 Object object(*location_);
41 if (object.IsSmi()) return true;
42 HeapObject heap_object = HeapObject::cast(object);
43 if (IsReadOnlyHeapObject(heap_object)) return true;
44 Isolate* isolate = GetIsolateFromWritableObject(heap_object);
45 RootIndex root_index;
46 if (isolate->roots_table().IsRootHandleLocation(location_, &root_index) &&
47 RootsTable::IsImmortalImmovable(root_index)) {
48 return true;
49 }
50 if (isolate->IsBuiltinTableHandleLocation(location_)) return true;
51 if (!AllowHandleDereference::IsAllowed()) return false;
52
53 // Allocations in the shared heap may be dereferenced by multiple threads.
54 if (isolate->is_shared()) return true;
55
56 LocalHeap* local_heap = isolate->CurrentLocalHeap();
57
58 // Local heap can't access handles when parked
59 if (!local_heap->IsHandleDereferenceAllowed()) {
60 StdoutStream{} << "Cannot dereference handle owned by "
61 << "non-running local heap\n";
62 return false;
63 }
64
65 // We are pretty strict with handle dereferences on background threads: A
66 // background local heap is only allowed to dereference its own local or
67 // persistent handles.
68 if (!local_heap->is_main_thread()) {
69 // The current thread owns the handle and thus can dereference it.
70 return local_heap->ContainsPersistentHandle(location_) ||
71 local_heap->ContainsLocalHandle(location_);
72 }
73 // If LocalHeap::Current() is null, we're on the main thread -- if we were to
74 // check main thread HandleScopes here, we should additionally check the
75 // main-thread LocalHeap.
76 DCHECK_EQ(ThreadId::Current(), isolate->thread_id());
77
78 // TODO(leszeks): Check if the main thread owns this handle.
79 return true;
80 }
81 #endif
82
NumberOfHandles(Isolate * isolate)83 int HandleScope::NumberOfHandles(Isolate* isolate) {
84 HandleScopeImplementer* impl = isolate->handle_scope_implementer();
85 int n = static_cast<int>(impl->blocks()->size());
86 if (n == 0) return 0;
87 return ((n - 1) * kHandleBlockSize) +
88 static_cast<int>(
89 (isolate->handle_scope_data()->next - impl->blocks()->back()));
90 }
91
Extend(Isolate * isolate)92 Address* HandleScope::Extend(Isolate* isolate) {
93 HandleScopeData* current = isolate->handle_scope_data();
94
95 Address* result = current->next;
96
97 DCHECK(result == current->limit);
98 // Make sure there's at least one scope on the stack and that the
99 // top of the scope stack isn't a barrier.
100 if (!Utils::ApiCheck(current->level != current->sealed_level,
101 "v8::HandleScope::CreateHandle()",
102 "Cannot create a handle without a HandleScope")) {
103 return nullptr;
104 }
105 HandleScopeImplementer* impl = isolate->handle_scope_implementer();
106 // If there's more room in the last block, we use that. This is used
107 // for fast creation of scopes after scope barriers.
108 if (!impl->blocks()->empty()) {
109 Address* limit = &impl->blocks()->back()[kHandleBlockSize];
110 if (current->limit != limit) {
111 current->limit = limit;
112 DCHECK_LT(limit - current->next, kHandleBlockSize);
113 }
114 }
115
116 // If we still haven't found a slot for the handle, we extend the
117 // current handle scope by allocating a new handle block.
118 if (result == current->limit) {
119 // If there's a spare block, use it for growing the current scope.
120 result = impl->GetSpareOrNewBlock();
121 // Add the extension to the global list of blocks, but count the
122 // extension as part of the current scope.
123 impl->blocks()->push_back(result);
124 current->limit = &result[kHandleBlockSize];
125 }
126
127 return result;
128 }
129
DeleteExtensions(Isolate * isolate)130 void HandleScope::DeleteExtensions(Isolate* isolate) {
131 HandleScopeData* current = isolate->handle_scope_data();
132 isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
133 }
134
135 #ifdef ENABLE_HANDLE_ZAPPING
ZapRange(Address * start,Address * end)136 void HandleScope::ZapRange(Address* start, Address* end) {
137 DCHECK_LE(end - start, kHandleBlockSize);
138 for (Address* p = start; p != end; p++) {
139 *p = static_cast<Address>(kHandleZapValue);
140 }
141 }
142 #endif
143
current_level_address(Isolate * isolate)144 Address HandleScope::current_level_address(Isolate* isolate) {
145 return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
146 }
147
current_next_address(Isolate * isolate)148 Address HandleScope::current_next_address(Isolate* isolate) {
149 return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
150 }
151
current_limit_address(Isolate * isolate)152 Address HandleScope::current_limit_address(Isolate* isolate) {
153 return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
154 }
155
CanonicalHandleScope(Isolate * isolate,Zone * zone)156 CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate, Zone* zone)
157 : zone_(zone == nullptr ? new Zone(isolate->allocator(), ZONE_NAME) : zone),
158 isolate_(isolate) {
159 HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
160 prev_canonical_scope_ = handle_scope_data->canonical_scope;
161 handle_scope_data->canonical_scope = this;
162 root_index_map_ = new RootIndexMap(isolate);
163 identity_map_ = std::make_unique<CanonicalHandlesMap>(
164 isolate->heap(), ZoneAllocationPolicy(zone_));
165 canonical_level_ = handle_scope_data->level;
166 }
167
~CanonicalHandleScope()168 CanonicalHandleScope::~CanonicalHandleScope() {
169 delete root_index_map_;
170 // Note: both the identity_map_ (zone-allocated) and the zone_ itself may
171 // have custom ownership semantics, controlled by subclasses. For example, in
172 // case of external ownership, the subclass destructor may 'steal' both by
173 // resetting the identity map pointer and nulling the zone.
174 identity_map_.reset();
175 delete zone_;
176 isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
177 }
178
Lookup(Address object)179 Address* CanonicalHandleScope::Lookup(Address object) {
180 DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
181 if (isolate_->handle_scope_data()->level != canonical_level_) {
182 // We are in an inner handle scope. Do not canonicalize since we will leave
183 // this handle scope while still being in the canonical scope.
184 return HandleScope::CreateHandle(isolate_, object);
185 }
186 if (Internals::HasHeapObjectTag(object)) {
187 RootIndex root_index;
188 if (root_index_map_->Lookup(object, &root_index)) {
189 return isolate_->root_handle(root_index).location();
190 }
191 }
192 auto find_result = identity_map_->FindOrInsert(Object(object));
193 if (!find_result.already_exists) {
194 // Allocate new handle location.
195 *find_result.entry = HandleScope::CreateHandle(isolate_, object);
196 }
197 return *find_result.entry;
198 }
199
200 std::unique_ptr<CanonicalHandlesMap>
DetachCanonicalHandles()201 CanonicalHandleScope::DetachCanonicalHandles() {
202 return std::move(identity_map_);
203 }
204
205 template <class CompilationInfoT>
206 CanonicalHandleScopeForOptimization<CompilationInfoT>::
CanonicalHandleScopeForOptimization(Isolate * isolate,CompilationInfoT * info)207 CanonicalHandleScopeForOptimization(Isolate* isolate,
208 CompilationInfoT* info)
209 : CanonicalHandleScope(isolate, info->zone()), info_(info) {}
210
211 template <class CompilationInfoT>
212 CanonicalHandleScopeForOptimization<
~CanonicalHandleScopeForOptimization()213 CompilationInfoT>::~CanonicalHandleScopeForOptimization() {
214 // We created the identity map on the compilation info's zone(). Pass
215 // ownership to the compilation info which is responsible for the disposal.
216 info_->set_canonical_handles(DetachCanonicalHandles());
217 zone_ = nullptr; // We don't own the zone, null it.
218 }
219
220 template class CanonicalHandleScopeForOptimization<OptimizedCompilationInfo>;
221 #ifdef V8_ENABLE_MAGLEV
222 template class CanonicalHandleScopeForOptimization<
223 maglev::ExportedMaglevCompilationInfo>;
224 #endif // V8_ENABLE_MAGLEV
225
226 } // namespace internal
227 } // namespace v8
228