1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/handles.h"
6
7 #include "src/address-map.h"
8 #include "src/api.h"
9 #include "src/base/logging.h"
10 #include "src/identity-map.h"
11 #include "src/maybe-handles.h"
12 #include "src/objects-inl.h"
13
14 namespace v8 {
15 namespace internal {
16
17 // Handles should be trivially copyable so that they can be efficiently passed
18 // by value. If they are not trivially copyable, they cannot be passed in
19 // registers.
20 ASSERT_TRIVIALLY_COPYABLE(HandleBase);
21 ASSERT_TRIVIALLY_COPYABLE(Handle<Object>);
22 ASSERT_TRIVIALLY_COPYABLE(MaybeHandle<Object>);
23
24 #ifdef DEBUG
IsDereferenceAllowed(DereferenceCheckMode mode) const25 bool HandleBase::IsDereferenceAllowed(DereferenceCheckMode mode) const {
26 DCHECK_NOT_NULL(location_);
27 Object* object = *location_;
28 if (object->IsSmi()) return true;
29 HeapObject* heap_object = HeapObject::cast(object);
30 Isolate* isolate;
31 if (!Isolate::FromWritableHeapObject(heap_object, &isolate)) return true;
32 Heap* heap = isolate->heap();
33 Object** roots_array_start = heap->roots_array_start();
34 if (roots_array_start <= location_ &&
35 location_ < roots_array_start + Heap::kStrongRootListLength &&
36 heap->RootCanBeTreatedAsConstant(
37 static_cast<Heap::RootListIndex>(location_ - roots_array_start))) {
38 return true;
39 }
40 if (!AllowHandleDereference::IsAllowed()) return false;
41 if (mode == INCLUDE_DEFERRED_CHECK &&
42 !AllowDeferredHandleDereference::IsAllowed()) {
43 // Accessing cells, maps and internalized strings is safe.
44 if (heap_object->IsCell()) return true;
45 if (heap_object->IsMap()) return true;
46 if (heap_object->IsInternalizedString()) return true;
47 return !isolate->IsDeferredHandle(location_);
48 }
49 return true;
50 }
51 #endif
52
53
NumberOfHandles(Isolate * isolate)54 int HandleScope::NumberOfHandles(Isolate* isolate) {
55 HandleScopeImplementer* impl = isolate->handle_scope_implementer();
56 int n = static_cast<int>(impl->blocks()->size());
57 if (n == 0) return 0;
58 return ((n - 1) * kHandleBlockSize) +
59 static_cast<int>(
60 (isolate->handle_scope_data()->next - impl->blocks()->back()));
61 }
62
63
Extend(Isolate * isolate)64 Object** HandleScope::Extend(Isolate* isolate) {
65 HandleScopeData* current = isolate->handle_scope_data();
66
67 Object** result = current->next;
68
69 DCHECK(result == current->limit);
70 // Make sure there's at least one scope on the stack and that the
71 // top of the scope stack isn't a barrier.
72 if (!Utils::ApiCheck(current->level != current->sealed_level,
73 "v8::HandleScope::CreateHandle()",
74 "Cannot create a handle without a HandleScope")) {
75 return nullptr;
76 }
77 HandleScopeImplementer* impl = isolate->handle_scope_implementer();
78 // If there's more room in the last block, we use that. This is used
79 // for fast creation of scopes after scope barriers.
80 if (!impl->blocks()->empty()) {
81 Object** limit = &impl->blocks()->back()[kHandleBlockSize];
82 if (current->limit != limit) {
83 current->limit = limit;
84 DCHECK_LT(limit - current->next, kHandleBlockSize);
85 }
86 }
87
88 // If we still haven't found a slot for the handle, we extend the
89 // current handle scope by allocating a new handle block.
90 if (result == current->limit) {
91 // If there's a spare block, use it for growing the current scope.
92 result = impl->GetSpareOrNewBlock();
93 // Add the extension to the global list of blocks, but count the
94 // extension as part of the current scope.
95 impl->blocks()->push_back(result);
96 current->limit = &result[kHandleBlockSize];
97 }
98
99 return result;
100 }
101
102
DeleteExtensions(Isolate * isolate)103 void HandleScope::DeleteExtensions(Isolate* isolate) {
104 HandleScopeData* current = isolate->handle_scope_data();
105 isolate->handle_scope_implementer()->DeleteExtensions(current->limit);
106 }
107
108
109 #ifdef ENABLE_HANDLE_ZAPPING
ZapRange(Object ** start,Object ** end)110 void HandleScope::ZapRange(Object** start, Object** end) {
111 DCHECK_LE(end - start, kHandleBlockSize);
112 for (Object** p = start; p != end; p++) {
113 *reinterpret_cast<Address*>(p) = static_cast<Address>(kHandleZapValue);
114 }
115 }
116 #endif
117
118
current_level_address(Isolate * isolate)119 Address HandleScope::current_level_address(Isolate* isolate) {
120 return reinterpret_cast<Address>(&isolate->handle_scope_data()->level);
121 }
122
123
current_next_address(Isolate * isolate)124 Address HandleScope::current_next_address(Isolate* isolate) {
125 return reinterpret_cast<Address>(&isolate->handle_scope_data()->next);
126 }
127
128
current_limit_address(Isolate * isolate)129 Address HandleScope::current_limit_address(Isolate* isolate) {
130 return reinterpret_cast<Address>(&isolate->handle_scope_data()->limit);
131 }
132
CanonicalHandleScope(Isolate * isolate)133 CanonicalHandleScope::CanonicalHandleScope(Isolate* isolate)
134 : isolate_(isolate), zone_(isolate->allocator(), ZONE_NAME) {
135 HandleScopeData* handle_scope_data = isolate_->handle_scope_data();
136 prev_canonical_scope_ = handle_scope_data->canonical_scope;
137 handle_scope_data->canonical_scope = this;
138 root_index_map_ = new RootIndexMap(isolate);
139 identity_map_ = new IdentityMap<Object**, ZoneAllocationPolicy>(
140 isolate->heap(), ZoneAllocationPolicy(&zone_));
141 canonical_level_ = handle_scope_data->level;
142 }
143
144
~CanonicalHandleScope()145 CanonicalHandleScope::~CanonicalHandleScope() {
146 delete root_index_map_;
147 delete identity_map_;
148 isolate_->handle_scope_data()->canonical_scope = prev_canonical_scope_;
149 }
150
151
Lookup(Object * object)152 Object** CanonicalHandleScope::Lookup(Object* object) {
153 DCHECK_LE(canonical_level_, isolate_->handle_scope_data()->level);
154 if (isolate_->handle_scope_data()->level != canonical_level_) {
155 // We are in an inner handle scope. Do not canonicalize since we will leave
156 // this handle scope while still being in the canonical scope.
157 return HandleScope::CreateHandle(isolate_, object);
158 }
159 if (object->IsHeapObject()) {
160 int index = root_index_map_->Lookup(HeapObject::cast(object));
161 if (index != RootIndexMap::kInvalidRootIndex) {
162 return isolate_->heap()
163 ->root_handle(static_cast<Heap::RootListIndex>(index))
164 .location();
165 }
166 }
167 Object*** entry = identity_map_->Get(object);
168 if (*entry == nullptr) {
169 // Allocate new handle location.
170 *entry = HandleScope::CreateHandle(isolate_, object);
171 }
172 return reinterpret_cast<Object**>(*entry);
173 }
174
175
DeferredHandleScope(Isolate * isolate)176 DeferredHandleScope::DeferredHandleScope(Isolate* isolate)
177 : impl_(isolate->handle_scope_implementer()) {
178 impl_->BeginDeferredScope();
179 HandleScopeData* data = impl_->isolate()->handle_scope_data();
180 Object** new_next = impl_->GetSpareOrNewBlock();
181 Object** new_limit = &new_next[kHandleBlockSize];
182 // Check that at least one HandleScope with at least one Handle in it exists,
183 // see the class description.
184 DCHECK(!impl_->blocks()->empty());
185 // Check that we are not in a SealedHandleScope.
186 DCHECK(data->limit == &impl_->blocks()->back()[kHandleBlockSize]);
187 impl_->blocks()->push_back(new_next);
188
189 #ifdef DEBUG
190 prev_level_ = data->level;
191 #endif
192 data->level++;
193 prev_limit_ = data->limit;
194 prev_next_ = data->next;
195 data->next = new_next;
196 data->limit = new_limit;
197 }
198
199
~DeferredHandleScope()200 DeferredHandleScope::~DeferredHandleScope() {
201 impl_->isolate()->handle_scope_data()->level--;
202 DCHECK(handles_detached_);
203 DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_);
204 }
205
206
Detach()207 DeferredHandles* DeferredHandleScope::Detach() {
208 DeferredHandles* deferred = impl_->Detach(prev_limit_);
209 HandleScopeData* data = impl_->isolate()->handle_scope_data();
210 data->next = prev_next_;
211 data->limit = prev_limit_;
212 #ifdef DEBUG
213 handles_detached_ = true;
214 #endif
215 return deferred;
216 }
217
218 } // namespace internal
219 } // namespace v8
220