1 // Copyright 2019 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/execution/isolate-inl.h"
6 #include "src/objects/code.h"
7 #include "src/objects/maybe-object.h"
8 #include "src/objects/shared-function-info.h"
9
10 #include "src/objects/osr-optimized-code-cache.h"
11
12 namespace v8 {
13 namespace internal {
14
15 const int OSROptimizedCodeCache::kInitialLength;
16 const int OSROptimizedCodeCache::kMaxLength;
17
AddOptimizedCode(Handle<NativeContext> native_context,Handle<SharedFunctionInfo> shared,Handle<Code> code,BailoutId osr_offset)18 void OSROptimizedCodeCache::AddOptimizedCode(
19 Handle<NativeContext> native_context, Handle<SharedFunctionInfo> shared,
20 Handle<Code> code, BailoutId osr_offset) {
21 DCHECK(!osr_offset.IsNone());
22 DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
23 STATIC_ASSERT(kEntryLength == 3);
24 Isolate* isolate = native_context->GetIsolate();
25 DCHECK(!isolate->serializer_enabled());
26
27 Handle<OSROptimizedCodeCache> osr_cache(
28 native_context->GetOSROptimizedCodeCache(), isolate);
29
30 DCHECK_EQ(osr_cache->FindEntry(shared, osr_offset), -1);
31 int entry = -1;
32 for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
33 if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
34 osr_cache->Get(index + kCachedCodeOffset)->IsCleared()) {
35 entry = index;
36 break;
37 }
38 }
39
40 if (entry == -1 && osr_cache->length() + kEntryLength <= kMaxLength) {
41 entry = GrowOSRCache(native_context, &osr_cache);
42 } else if (entry == -1) {
43 // We reached max capacity and cannot grow further. Reuse an existing entry.
44 // TODO(mythria): We could use better mechanisms (like lru) to replace
45 // existing entries. Though we don't expect this to be a common case, so
46 // for now choosing to replace the first entry.
47 entry = 0;
48 }
49
50 osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
51 }
52
Clear(NativeContext native_context)53 void OSROptimizedCodeCache::Clear(NativeContext native_context) {
54 native_context.set_osr_code_cache(
55 *native_context.GetIsolate()->factory()->empty_weak_fixed_array());
56 }
57
Compact(Handle<NativeContext> native_context)58 void OSROptimizedCodeCache::Compact(Handle<NativeContext> native_context) {
59 Handle<OSROptimizedCodeCache> osr_cache(
60 native_context->GetOSROptimizedCodeCache(), native_context->GetIsolate());
61 Isolate* isolate = native_context->GetIsolate();
62
63 // Re-adjust the cache so all the valid entries are on one side. This will
64 // enable us to compress the cache if needed.
65 int curr_valid_index = 0;
66 for (int curr_index = 0; curr_index < osr_cache->length();
67 curr_index += kEntryLength) {
68 if (osr_cache->Get(curr_index + kSharedOffset)->IsCleared() ||
69 osr_cache->Get(curr_index + kCachedCodeOffset)->IsCleared()) {
70 continue;
71 }
72 if (curr_valid_index != curr_index) {
73 osr_cache->MoveEntry(curr_index, curr_valid_index, isolate);
74 }
75 curr_valid_index += kEntryLength;
76 }
77
78 if (!NeedsTrimming(curr_valid_index, osr_cache->length())) return;
79
80 Handle<OSROptimizedCodeCache> new_osr_cache =
81 Handle<OSROptimizedCodeCache>::cast(isolate->factory()->NewWeakFixedArray(
82 CapacityForLength(curr_valid_index), AllocationType::kOld));
83 DCHECK_LT(new_osr_cache->length(), osr_cache->length());
84 {
85 DisallowHeapAllocation no_gc;
86 new_osr_cache->CopyElements(native_context->GetIsolate(), 0, *osr_cache, 0,
87 new_osr_cache->length(),
88 new_osr_cache->GetWriteBarrierMode(no_gc));
89 }
90 native_context->set_osr_code_cache(*new_osr_cache);
91 }
92
GetOptimizedCode(Handle<SharedFunctionInfo> shared,BailoutId osr_offset,Isolate * isolate)93 Code OSROptimizedCodeCache::GetOptimizedCode(Handle<SharedFunctionInfo> shared,
94 BailoutId osr_offset,
95 Isolate* isolate) {
96 DisallowHeapAllocation no_gc;
97 int index = FindEntry(shared, osr_offset);
98 if (index == -1) return Code();
99 Code code = GetCodeFromEntry(index);
100 if (code.is_null()) {
101 ClearEntry(index, isolate);
102 return code;
103 }
104 DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
105 return code;
106 }
107
EvictMarkedCode(Isolate * isolate)108 void OSROptimizedCodeCache::EvictMarkedCode(Isolate* isolate) {
109 // This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
110 // and hence the DisallowHeapAllocation scope here.
111 DisallowHeapAllocation no_gc;
112 for (int index = 0; index < length(); index += kEntryLength) {
113 MaybeObject code_entry = Get(index + kCachedCodeOffset);
114 HeapObject heap_object;
115 if (!code_entry->GetHeapObject(&heap_object)) continue;
116
117 DCHECK(heap_object.IsCode());
118 DCHECK(Code::cast(heap_object).is_optimized_code());
119 if (!Code::cast(heap_object).marked_for_deoptimization()) continue;
120
121 ClearEntry(index, isolate);
122 }
123 }
124
GrowOSRCache(Handle<NativeContext> native_context,Handle<OSROptimizedCodeCache> * osr_cache)125 int OSROptimizedCodeCache::GrowOSRCache(
126 Handle<NativeContext> native_context,
127 Handle<OSROptimizedCodeCache>* osr_cache) {
128 Isolate* isolate = native_context->GetIsolate();
129 int old_length = (*osr_cache)->length();
130 int grow_by = CapacityForLength(old_length) - old_length;
131 DCHECK_GT(grow_by, kEntryLength);
132 *osr_cache = Handle<OSROptimizedCodeCache>::cast(
133 isolate->factory()->CopyWeakFixedArrayAndGrow(*osr_cache, grow_by));
134 for (int i = old_length; i < (*osr_cache)->length(); i++) {
135 (*osr_cache)->Set(i, HeapObjectReference::ClearedValue(isolate));
136 }
137 native_context->set_osr_code_cache(**osr_cache);
138
139 return old_length;
140 }
141
GetCodeFromEntry(int index)142 Code OSROptimizedCodeCache::GetCodeFromEntry(int index) {
143 DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
144 DCHECK_EQ(index % kEntryLength, 0);
145 HeapObject code_entry;
146 Get(index + OSRCodeCacheConstants::kCachedCodeOffset)
147 ->GetHeapObject(&code_entry);
148 return code_entry.is_null() ? Code() : Code::cast(code_entry);
149 }
150
GetSFIFromEntry(int index)151 SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) {
152 DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
153 DCHECK_EQ(index % kEntryLength, 0);
154 HeapObject sfi_entry;
155 Get(index + OSRCodeCacheConstants::kSharedOffset)->GetHeapObject(&sfi_entry);
156 return sfi_entry.is_null() ? SharedFunctionInfo()
157 : SharedFunctionInfo::cast(sfi_entry);
158 }
159
GetBailoutIdFromEntry(int index)160 BailoutId OSROptimizedCodeCache::GetBailoutIdFromEntry(int index) {
161 DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
162 DCHECK_EQ(index % kEntryLength, 0);
163 Smi osr_offset_entry;
164 Get(index + kOsrIdOffset)->ToSmi(&osr_offset_entry);
165 return BailoutId(osr_offset_entry.value());
166 }
167
FindEntry(Handle<SharedFunctionInfo> shared,BailoutId osr_offset)168 int OSROptimizedCodeCache::FindEntry(Handle<SharedFunctionInfo> shared,
169 BailoutId osr_offset) {
170 DisallowHeapAllocation no_gc;
171 DCHECK(!osr_offset.IsNone());
172 for (int index = 0; index < length(); index += kEntryLength) {
173 if (GetSFIFromEntry(index) != *shared) continue;
174 if (GetBailoutIdFromEntry(index) != osr_offset) continue;
175 return index;
176 }
177 return -1;
178 }
179
ClearEntry(int index,Isolate * isolate)180 void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) {
181 Set(index + OSRCodeCacheConstants::kSharedOffset,
182 HeapObjectReference::ClearedValue(isolate));
183 Set(index + OSRCodeCacheConstants::kCachedCodeOffset,
184 HeapObjectReference::ClearedValue(isolate));
185 Set(index + OSRCodeCacheConstants::kOsrIdOffset,
186 HeapObjectReference::ClearedValue(isolate));
187 }
188
InitializeEntry(int entry,SharedFunctionInfo shared,Code code,BailoutId osr_offset)189 void OSROptimizedCodeCache::InitializeEntry(int entry,
190 SharedFunctionInfo shared,
191 Code code, BailoutId osr_offset) {
192 Set(entry + OSRCodeCacheConstants::kSharedOffset,
193 HeapObjectReference::Weak(shared));
194 Set(entry + OSRCodeCacheConstants::kCachedCodeOffset,
195 HeapObjectReference::Weak(code));
196 Set(entry + OSRCodeCacheConstants::kOsrIdOffset,
197 MaybeObject::FromSmi(Smi::FromInt(osr_offset.ToInt())));
198 }
199
MoveEntry(int src,int dst,Isolate * isolate)200 void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) {
201 Set(dst + OSRCodeCacheConstants::kSharedOffset,
202 Get(src + OSRCodeCacheConstants::kSharedOffset));
203 Set(dst + OSRCodeCacheConstants::kCachedCodeOffset,
204 Get(src + OSRCodeCacheConstants::kCachedCodeOffset));
205 Set(dst + OSRCodeCacheConstants::kOsrIdOffset, Get(src + kOsrIdOffset));
206 ClearEntry(src, isolate);
207 }
208
CapacityForLength(int curr_length)209 int OSROptimizedCodeCache::CapacityForLength(int curr_length) {
210 // TODO(mythria): This is a randomly chosen heuristic and is not based on any
211 // data. We may have to tune this later.
212 if (curr_length == 0) return kInitialLength;
213 if (curr_length * 2 > kMaxLength) return kMaxLength;
214 return curr_length * 2;
215 }
216
NeedsTrimming(int num_valid_entries,int curr_length)217 bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
218 int curr_length) {
219 return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
220 }
221
222 } // namespace internal
223 } // namespace v8
224