1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "codegen.h"
31 #include "deoptimizer.h"
32 #include "full-codegen.h"
33 #include "safepoint-table.h"
34
35 namespace v8 {
36 namespace internal {
37
38
patch_size()39 int Deoptimizer::patch_size() {
40 const int kCallInstructionSizeInWords = 4;
41 return kCallInstructionSizeInWords * Assembler::kInstrSize;
42 }
43
44
DeoptimizeFunction(JSFunction * function)45 void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
46 HandleScope scope;
47 AssertNoAllocation no_allocation;
48
49 if (!function->IsOptimized()) return;
50
51 // Get the optimized code.
52 Code* code = function->code();
53 Address code_start_address = code->instruction_start();
54
55 // Invalidate the relocation information, as it will become invalid by the
56 // code patching below, and is not needed any more.
57 code->InvalidateRelocation();
58
59 // For each LLazyBailout instruction insert a call to the corresponding
60 // deoptimization entry.
61 DeoptimizationInputData* deopt_data =
62 DeoptimizationInputData::cast(code->deoptimization_data());
63 #ifdef DEBUG
64 Address prev_call_address = NULL;
65 #endif
66 for (int i = 0; i < deopt_data->DeoptCount(); i++) {
67 if (deopt_data->Pc(i)->value() == -1) continue;
68 Address call_address = code_start_address + deopt_data->Pc(i)->value();
69 Address deopt_entry = GetDeoptimizationEntry(i, LAZY);
70 int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
71 RelocInfo::NONE);
72 int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
73 ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
74 ASSERT(call_size_in_bytes <= patch_size());
75 CodePatcher patcher(call_address, call_size_in_words);
76 patcher.masm()->Call(deopt_entry, RelocInfo::NONE);
77 ASSERT(prev_call_address == NULL ||
78 call_address >= prev_call_address + patch_size());
79 ASSERT(call_address + patch_size() <= code->instruction_end());
80
81 #ifdef DEBUG
82 prev_call_address = call_address;
83 #endif
84 }
85
86 Isolate* isolate = code->GetIsolate();
87
88 // Add the deoptimizing code to the list.
89 DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
90 DeoptimizerData* data = isolate->deoptimizer_data();
91 node->set_next(data->deoptimizing_code_list_);
92 data->deoptimizing_code_list_ = node;
93
94 // We might be in the middle of incremental marking with compaction.
95 // Tell collector to treat this code object in a special way and
96 // ignore all slots that might have been recorded on it.
97 isolate->heap()->mark_compact_collector()->InvalidateCode(code);
98
99 // Set the code for the function to non-optimized version.
100 function->ReplaceCode(function->shared()->code());
101
102 if (FLAG_trace_deopt) {
103 PrintF("[forced deoptimization: ");
104 function->PrintName();
105 PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
106 #ifdef DEBUG
107 if (FLAG_print_code) {
108 code->PrintLn();
109 }
110 #endif
111 }
112 }
113
114
PatchStackCheckCodeAt(Code * unoptimized_code,Address pc_after,Code * check_code,Code * replacement_code)115 void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
116 Address pc_after,
117 Code* check_code,
118 Code* replacement_code) {
119 const int kInstrSize = Assembler::kInstrSize;
120 // This structure comes from FullCodeGenerator::EmitStackCheck.
121 // The call of the stack guard check has the following form:
122 // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
123 // beq at, zero_reg, ok
124 // lui t9, <stack guard address> upper
125 // ori t9, <stack guard address> lower
126 // jalr t9
127 // nop
128 // ----- pc_after points here
129
130 ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
131
132 // Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
133 CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
134 patcher.masm()->addiu(at, zero_reg, 1);
135
136 // Replace the stack check address in the load-immediate (lui/ori pair)
137 // with the entry address of the replacement code.
138 ASSERT(reinterpret_cast<uint32_t>(
139 Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
140 reinterpret_cast<uint32_t>(check_code->entry()));
141 Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
142 replacement_code->entry());
143
144 // We patched the code to the following form:
145 // addiu at, zero_reg, 1
146 // beq at, zero_reg, ok ;; Not changed
147 // lui t9, <on-stack replacement address> upper
148 // ori t9, <on-stack replacement address> lower
149 // jalr t9 ;; Not changed
150 // nop ;; Not changed
151 // ----- pc_after points here
152
153 unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
154 unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
155 }
156
157
RevertStackCheckCodeAt(Code * unoptimized_code,Address pc_after,Code * check_code,Code * replacement_code)158 void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
159 Address pc_after,
160 Code* check_code,
161 Code* replacement_code) {
162 // Exact opposite of the function above.
163 const int kInstrSize = Assembler::kInstrSize;
164 ASSERT(Assembler::IsAddImmediate(
165 Assembler::instr_at(pc_after - 6 * kInstrSize)));
166 ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
167
168 // Restore the sltu instruction so beq can be taken again.
169 CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
170 if (FLAG_count_based_interrupts) {
171 patcher.masm()->slt(at, a3, zero_reg);
172 } else {
173 patcher.masm()->sltu(at, sp, t0);
174 }
175
176 // Replace the on-stack replacement address in the load-immediate (lui/ori
177 // pair) with the entry address of the normal stack-check code.
178 ASSERT(reinterpret_cast<uint32_t>(
179 Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
180 reinterpret_cast<uint32_t>(replacement_code->entry()));
181 Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
182 check_code->entry());
183
184 check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
185 unoptimized_code, pc_after - 4 * kInstrSize, check_code);
186 }
187
188
LookupBailoutId(DeoptimizationInputData * data,unsigned ast_id)189 static int LookupBailoutId(DeoptimizationInputData* data, unsigned ast_id) {
190 ByteArray* translations = data->TranslationByteArray();
191 int length = data->DeoptCount();
192 for (int i = 0; i < length; i++) {
193 if (static_cast<unsigned>(data->AstId(i)->value()) == ast_id) {
194 TranslationIterator it(translations, data->TranslationIndex(i)->value());
195 int value = it.Next();
196 ASSERT(Translation::BEGIN == static_cast<Translation::Opcode>(value));
197 // Read the number of frames.
198 value = it.Next();
199 if (value == 1) return i;
200 }
201 }
202 UNREACHABLE();
203 return -1;
204 }
205
206
DoComputeOsrOutputFrame()207 void Deoptimizer::DoComputeOsrOutputFrame() {
208 DeoptimizationInputData* data = DeoptimizationInputData::cast(
209 optimized_code_->deoptimization_data());
210 unsigned ast_id = data->OsrAstId()->value();
211
212 int bailout_id = LookupBailoutId(data, ast_id);
213 unsigned translation_index = data->TranslationIndex(bailout_id)->value();
214 ByteArray* translations = data->TranslationByteArray();
215
216 TranslationIterator iterator(translations, translation_index);
217 Translation::Opcode opcode =
218 static_cast<Translation::Opcode>(iterator.Next());
219 ASSERT(Translation::BEGIN == opcode);
220 USE(opcode);
221 int count = iterator.Next();
222 iterator.Skip(1); // Drop JS frame count.
223 ASSERT(count == 1);
224 USE(count);
225
226 opcode = static_cast<Translation::Opcode>(iterator.Next());
227 USE(opcode);
228 ASSERT(Translation::JS_FRAME == opcode);
229 unsigned node_id = iterator.Next();
230 USE(node_id);
231 ASSERT(node_id == ast_id);
232 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator.Next()));
233 USE(function);
234 ASSERT(function == function_);
235 unsigned height = iterator.Next();
236 unsigned height_in_bytes = height * kPointerSize;
237 USE(height_in_bytes);
238
239 unsigned fixed_size = ComputeFixedSize(function_);
240 unsigned input_frame_size = input_->GetFrameSize();
241 ASSERT(fixed_size + height_in_bytes == input_frame_size);
242
243 unsigned stack_slot_size = optimized_code_->stack_slots() * kPointerSize;
244 unsigned outgoing_height = data->ArgumentsStackHeight(bailout_id)->value();
245 unsigned outgoing_size = outgoing_height * kPointerSize;
246 unsigned output_frame_size = fixed_size + stack_slot_size + outgoing_size;
247 ASSERT(outgoing_size == 0); // OSR does not happen in the middle of a call.
248
249 if (FLAG_trace_osr) {
250 PrintF("[on-stack replacement: begin 0x%08" V8PRIxPTR " ",
251 reinterpret_cast<intptr_t>(function_));
252 function_->PrintName();
253 PrintF(" => node=%u, frame=%d->%d]\n",
254 ast_id,
255 input_frame_size,
256 output_frame_size);
257 }
258
259 // There's only one output frame in the OSR case.
260 output_count_ = 1;
261 output_ = new FrameDescription*[1];
262 output_[0] = new(output_frame_size) FrameDescription(
263 output_frame_size, function_);
264 output_[0]->SetFrameType(StackFrame::JAVA_SCRIPT);
265
266 // Clear the incoming parameters in the optimized frame to avoid
267 // confusing the garbage collector.
268 unsigned output_offset = output_frame_size - kPointerSize;
269 int parameter_count = function_->shared()->formal_parameter_count() + 1;
270 for (int i = 0; i < parameter_count; ++i) {
271 output_[0]->SetFrameSlot(output_offset, 0);
272 output_offset -= kPointerSize;
273 }
274
275 // Translate the incoming parameters. This may overwrite some of the
276 // incoming argument slots we've just cleared.
277 int input_offset = input_frame_size - kPointerSize;
278 bool ok = true;
279 int limit = input_offset - (parameter_count * kPointerSize);
280 while (ok && input_offset > limit) {
281 ok = DoOsrTranslateCommand(&iterator, &input_offset);
282 }
283
284 // There are no translation commands for the caller's pc and fp, the
285 // context, and the function. Set them up explicitly.
286 for (int i = StandardFrameConstants::kCallerPCOffset;
287 ok && i >= StandardFrameConstants::kMarkerOffset;
288 i -= kPointerSize) {
289 uint32_t input_value = input_->GetFrameSlot(input_offset);
290 if (FLAG_trace_osr) {
291 const char* name = "UNKNOWN";
292 switch (i) {
293 case StandardFrameConstants::kCallerPCOffset:
294 name = "caller's pc";
295 break;
296 case StandardFrameConstants::kCallerFPOffset:
297 name = "fp";
298 break;
299 case StandardFrameConstants::kContextOffset:
300 name = "context";
301 break;
302 case StandardFrameConstants::kMarkerOffset:
303 name = "function";
304 break;
305 }
306 PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
307 output_offset,
308 input_value,
309 input_offset,
310 name);
311 }
312
313 output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
314 input_offset -= kPointerSize;
315 output_offset -= kPointerSize;
316 }
317
318 // Translate the rest of the frame.
319 while (ok && input_offset >= 0) {
320 ok = DoOsrTranslateCommand(&iterator, &input_offset);
321 }
322
323 // If translation of any command failed, continue using the input frame.
324 if (!ok) {
325 delete output_[0];
326 output_[0] = input_;
327 output_[0]->SetPc(reinterpret_cast<uint32_t>(from_));
328 } else {
329 // Set up the frame pointer and the context pointer.
330 output_[0]->SetRegister(fp.code(), input_->GetRegister(fp.code()));
331 output_[0]->SetRegister(cp.code(), input_->GetRegister(cp.code()));
332
333 unsigned pc_offset = data->OsrPcOffset()->value();
334 uint32_t pc = reinterpret_cast<uint32_t>(
335 optimized_code_->entry() + pc_offset);
336 output_[0]->SetPc(pc);
337 }
338 Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
339 output_[0]->SetContinuation(
340 reinterpret_cast<uint32_t>(continuation->entry()));
341
342 if (FLAG_trace_osr) {
343 PrintF("[on-stack replacement translation %s: 0x%08" V8PRIxPTR " ",
344 ok ? "finished" : "aborted",
345 reinterpret_cast<intptr_t>(function));
346 function->PrintName();
347 PrintF(" => pc=0x%0x]\n", output_[0]->GetPc());
348 }
349 }
350
351
DoComputeArgumentsAdaptorFrame(TranslationIterator * iterator,int frame_index)352 void Deoptimizer::DoComputeArgumentsAdaptorFrame(TranslationIterator* iterator,
353 int frame_index) {
354 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
355 unsigned height = iterator->Next();
356 unsigned height_in_bytes = height * kPointerSize;
357 if (FLAG_trace_deopt) {
358 PrintF(" translating arguments adaptor => height=%d\n", height_in_bytes);
359 }
360
361 unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFrameSize;
362 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
363
364 // Allocate and store the output frame description.
365 FrameDescription* output_frame =
366 new(output_frame_size) FrameDescription(output_frame_size, function);
367 output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
368
369 // Arguments adaptor can not be topmost or bottommost.
370 ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
371 ASSERT(output_[frame_index] == NULL);
372 output_[frame_index] = output_frame;
373
374 // The top address of the frame is computed from the previous
375 // frame's top and this frame's size.
376 uint32_t top_address;
377 top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
378 output_frame->SetTop(top_address);
379
380 // Compute the incoming parameter translation.
381 int parameter_count = height;
382 unsigned output_offset = output_frame_size;
383 for (int i = 0; i < parameter_count; ++i) {
384 output_offset -= kPointerSize;
385 DoTranslateCommand(iterator, frame_index, output_offset);
386 }
387
388 // Read caller's PC from the previous frame.
389 output_offset -= kPointerSize;
390 intptr_t callers_pc = output_[frame_index - 1]->GetPc();
391 output_frame->SetFrameSlot(output_offset, callers_pc);
392 if (FLAG_trace_deopt) {
393 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
394 top_address + output_offset, output_offset, callers_pc);
395 }
396
397 // Read caller's FP from the previous frame, and set this frame's FP.
398 output_offset -= kPointerSize;
399 intptr_t value = output_[frame_index - 1]->GetFp();
400 output_frame->SetFrameSlot(output_offset, value);
401 intptr_t fp_value = top_address + output_offset;
402 output_frame->SetFp(fp_value);
403 if (FLAG_trace_deopt) {
404 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
405 fp_value, output_offset, value);
406 }
407
408 // A marker value is used in place of the context.
409 output_offset -= kPointerSize;
410 intptr_t context = reinterpret_cast<intptr_t>(
411 Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
412 output_frame->SetFrameSlot(output_offset, context);
413 if (FLAG_trace_deopt) {
414 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context (adaptor sentinel)\n",
415 top_address + output_offset, output_offset, context);
416 }
417
418 // The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
419 output_offset -= kPointerSize;
420 value = reinterpret_cast<intptr_t>(function);
421 output_frame->SetFrameSlot(output_offset, value);
422 if (FLAG_trace_deopt) {
423 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
424 top_address + output_offset, output_offset, value);
425 }
426
427 // Number of incoming arguments.
428 output_offset -= kPointerSize;
429 value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
430 output_frame->SetFrameSlot(output_offset, value);
431 if (FLAG_trace_deopt) {
432 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
433 top_address + output_offset, output_offset, value, height - 1);
434 }
435
436 ASSERT(0 == output_offset);
437
438 Builtins* builtins = isolate_->builtins();
439 Code* adaptor_trampoline =
440 builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
441 uint32_t pc = reinterpret_cast<uint32_t>(
442 adaptor_trampoline->instruction_start() +
443 isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
444 output_frame->SetPc(pc);
445 }
446
447
DoComputeConstructStubFrame(TranslationIterator * iterator,int frame_index)448 void Deoptimizer::DoComputeConstructStubFrame(TranslationIterator* iterator,
449 int frame_index) {
450 Builtins* builtins = isolate_->builtins();
451 Code* construct_stub = builtins->builtin(Builtins::kJSConstructStubGeneric);
452 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
453 unsigned height = iterator->Next();
454 unsigned height_in_bytes = height * kPointerSize;
455 if (FLAG_trace_deopt) {
456 PrintF(" translating construct stub => height=%d\n", height_in_bytes);
457 }
458
459 unsigned fixed_frame_size = 8 * kPointerSize;
460 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
461
462 // Allocate and store the output frame description.
463 FrameDescription* output_frame =
464 new(output_frame_size) FrameDescription(output_frame_size, function);
465 output_frame->SetFrameType(StackFrame::CONSTRUCT);
466
467 // Construct stub can not be topmost or bottommost.
468 ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
469 ASSERT(output_[frame_index] == NULL);
470 output_[frame_index] = output_frame;
471
472 // The top address of the frame is computed from the previous
473 // frame's top and this frame's size.
474 uint32_t top_address;
475 top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
476 output_frame->SetTop(top_address);
477
478 // Compute the incoming parameter translation.
479 int parameter_count = height;
480 unsigned output_offset = output_frame_size;
481 for (int i = 0; i < parameter_count; ++i) {
482 output_offset -= kPointerSize;
483 DoTranslateCommand(iterator, frame_index, output_offset);
484 }
485
486 // Read caller's PC from the previous frame.
487 output_offset -= kPointerSize;
488 intptr_t callers_pc = output_[frame_index - 1]->GetPc();
489 output_frame->SetFrameSlot(output_offset, callers_pc);
490 if (FLAG_trace_deopt) {
491 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
492 top_address + output_offset, output_offset, callers_pc);
493 }
494
495 // Read caller's FP from the previous frame, and set this frame's FP.
496 output_offset -= kPointerSize;
497 intptr_t value = output_[frame_index - 1]->GetFp();
498 output_frame->SetFrameSlot(output_offset, value);
499 intptr_t fp_value = top_address + output_offset;
500 output_frame->SetFp(fp_value);
501 if (FLAG_trace_deopt) {
502 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
503 fp_value, output_offset, value);
504 }
505
506 // The context can be gotten from the previous frame.
507 output_offset -= kPointerSize;
508 value = output_[frame_index - 1]->GetContext();
509 output_frame->SetFrameSlot(output_offset, value);
510 if (FLAG_trace_deopt) {
511 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
512 top_address + output_offset, output_offset, value);
513 }
514
515 // A marker value is used in place of the function.
516 output_offset -= kPointerSize;
517 value = reinterpret_cast<intptr_t>(Smi::FromInt(StackFrame::CONSTRUCT));
518 output_frame->SetFrameSlot(output_offset, value);
519 if (FLAG_trace_deopt) {
520 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function (construct sentinel)\n",
521 top_address + output_offset, output_offset, value);
522 }
523
524 // The output frame reflects a JSConstructStubGeneric frame.
525 output_offset -= kPointerSize;
526 value = reinterpret_cast<intptr_t>(construct_stub);
527 output_frame->SetFrameSlot(output_offset, value);
528 if (FLAG_trace_deopt) {
529 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; code object\n",
530 top_address + output_offset, output_offset, value);
531 }
532
533 // Number of incoming arguments.
534 output_offset -= kPointerSize;
535 value = reinterpret_cast<uint32_t>(Smi::FromInt(height - 1));
536 output_frame->SetFrameSlot(output_offset, value);
537 if (FLAG_trace_deopt) {
538 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; argc (%d)\n",
539 top_address + output_offset, output_offset, value, height - 1);
540 }
541
542 // Constructor function being invoked by the stub.
543 output_offset -= kPointerSize;
544 value = reinterpret_cast<intptr_t>(function);
545 output_frame->SetFrameSlot(output_offset, value);
546 if (FLAG_trace_deopt) {
547 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; constructor function\n",
548 top_address + output_offset, output_offset, value);
549 }
550
551 // The newly allocated object was passed as receiver in the artificial
552 // constructor stub environment created by HEnvironment::CopyForInlining().
553 output_offset -= kPointerSize;
554 value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
555 output_frame->SetFrameSlot(output_offset, value);
556 if (FLAG_trace_deopt) {
557 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; allocated receiver\n",
558 top_address + output_offset, output_offset, value);
559 }
560
561 ASSERT(0 == output_offset);
562
563 uint32_t pc = reinterpret_cast<uint32_t>(
564 construct_stub->instruction_start() +
565 isolate_->heap()->construct_stub_deopt_pc_offset()->value());
566 output_frame->SetPc(pc);
567 }
568
569
570 // This code is very similar to ia32/arm code, but relies on register names
571 // (fp, sp) and how the frame is laid out.
DoComputeJSFrame(TranslationIterator * iterator,int frame_index)572 void Deoptimizer::DoComputeJSFrame(TranslationIterator* iterator,
573 int frame_index) {
574 // Read the ast node id, function, and frame height for this output frame.
575 int node_id = iterator->Next();
576 JSFunction* function = JSFunction::cast(ComputeLiteral(iterator->Next()));
577 unsigned height = iterator->Next();
578 unsigned height_in_bytes = height * kPointerSize;
579 if (FLAG_trace_deopt) {
580 PrintF(" translating ");
581 function->PrintName();
582 PrintF(" => node=%d, height=%d\n", node_id, height_in_bytes);
583 }
584
585 // The 'fixed' part of the frame consists of the incoming parameters and
586 // the part described by JavaScriptFrameConstants.
587 unsigned fixed_frame_size = ComputeFixedSize(function);
588 unsigned input_frame_size = input_->GetFrameSize();
589 unsigned output_frame_size = height_in_bytes + fixed_frame_size;
590
591 // Allocate and store the output frame description.
592 FrameDescription* output_frame =
593 new(output_frame_size) FrameDescription(output_frame_size, function);
594 output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
595
596 bool is_bottommost = (0 == frame_index);
597 bool is_topmost = (output_count_ - 1 == frame_index);
598 ASSERT(frame_index >= 0 && frame_index < output_count_);
599 ASSERT(output_[frame_index] == NULL);
600 output_[frame_index] = output_frame;
601
602 // The top address for the bottommost output frame can be computed from
603 // the input frame pointer and the output frame's height. For all
604 // subsequent output frames, it can be computed from the previous one's
605 // top address and the current frame's size.
606 uint32_t top_address;
607 if (is_bottommost) {
608 // 2 = context and function in the frame.
609 top_address =
610 input_->GetRegister(fp.code()) - (2 * kPointerSize) - height_in_bytes;
611 } else {
612 top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
613 }
614 output_frame->SetTop(top_address);
615
616 // Compute the incoming parameter translation.
617 int parameter_count = function->shared()->formal_parameter_count() + 1;
618 unsigned output_offset = output_frame_size;
619 unsigned input_offset = input_frame_size;
620 for (int i = 0; i < parameter_count; ++i) {
621 output_offset -= kPointerSize;
622 DoTranslateCommand(iterator, frame_index, output_offset);
623 }
624 input_offset -= (parameter_count * kPointerSize);
625
626 // There are no translation commands for the caller's pc and fp, the
627 // context, and the function. Synthesize their values and set them up
628 // explicitly.
629 //
630 // The caller's pc for the bottommost output frame is the same as in the
631 // input frame. For all subsequent output frames, it can be read from the
632 // previous one. This frame's pc can be computed from the non-optimized
633 // function code and AST id of the bailout.
634 output_offset -= kPointerSize;
635 input_offset -= kPointerSize;
636 intptr_t value;
637 if (is_bottommost) {
638 value = input_->GetFrameSlot(input_offset);
639 } else {
640 value = output_[frame_index - 1]->GetPc();
641 }
642 output_frame->SetFrameSlot(output_offset, value);
643 if (FLAG_trace_deopt) {
644 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's pc\n",
645 top_address + output_offset, output_offset, value);
646 }
647
648 // The caller's frame pointer for the bottommost output frame is the same
649 // as in the input frame. For all subsequent output frames, it can be
650 // read from the previous one. Also compute and set this frame's frame
651 // pointer.
652 output_offset -= kPointerSize;
653 input_offset -= kPointerSize;
654 if (is_bottommost) {
655 value = input_->GetFrameSlot(input_offset);
656 } else {
657 value = output_[frame_index - 1]->GetFp();
658 }
659 output_frame->SetFrameSlot(output_offset, value);
660 intptr_t fp_value = top_address + output_offset;
661 ASSERT(!is_bottommost || input_->GetRegister(fp.code()) == fp_value);
662 output_frame->SetFp(fp_value);
663 if (is_topmost) {
664 output_frame->SetRegister(fp.code(), fp_value);
665 }
666 if (FLAG_trace_deopt) {
667 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; caller's fp\n",
668 fp_value, output_offset, value);
669 }
670
671 // For the bottommost output frame the context can be gotten from the input
672 // frame. For all subsequent output frames it can be gotten from the function
673 // so long as we don't inline functions that need local contexts.
674 output_offset -= kPointerSize;
675 input_offset -= kPointerSize;
676 if (is_bottommost) {
677 value = input_->GetFrameSlot(input_offset);
678 } else {
679 value = reinterpret_cast<intptr_t>(function->context());
680 }
681 output_frame->SetFrameSlot(output_offset, value);
682 output_frame->SetContext(value);
683 if (is_topmost) output_frame->SetRegister(cp.code(), value);
684 if (FLAG_trace_deopt) {
685 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; context\n",
686 top_address + output_offset, output_offset, value);
687 }
688
689 // The function was mentioned explicitly in the BEGIN_FRAME.
690 output_offset -= kPointerSize;
691 input_offset -= kPointerSize;
692 value = reinterpret_cast<uint32_t>(function);
693 // The function for the bottommost output frame should also agree with the
694 // input frame.
695 ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
696 output_frame->SetFrameSlot(output_offset, value);
697 if (FLAG_trace_deopt) {
698 PrintF(" 0x%08x: [top + %d] <- 0x%08x ; function\n",
699 top_address + output_offset, output_offset, value);
700 }
701
702 // Translate the rest of the frame.
703 for (unsigned i = 0; i < height; ++i) {
704 output_offset -= kPointerSize;
705 DoTranslateCommand(iterator, frame_index, output_offset);
706 }
707 ASSERT(0 == output_offset);
708
709 // Compute this frame's PC, state, and continuation.
710 Code* non_optimized_code = function->shared()->code();
711 FixedArray* raw_data = non_optimized_code->deoptimization_data();
712 DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
713 Address start = non_optimized_code->instruction_start();
714 unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
715 unsigned pc_offset = FullCodeGenerator::PcField::decode(pc_and_state);
716 uint32_t pc_value = reinterpret_cast<uint32_t>(start + pc_offset);
717 output_frame->SetPc(pc_value);
718
719 FullCodeGenerator::State state =
720 FullCodeGenerator::StateField::decode(pc_and_state);
721 output_frame->SetState(Smi::FromInt(state));
722
723
724 // Set the continuation for the topmost frame.
725 if (is_topmost && bailout_type_ != DEBUGGER) {
726 Builtins* builtins = isolate_->builtins();
727 Code* continuation = (bailout_type_ == EAGER)
728 ? builtins->builtin(Builtins::kNotifyDeoptimized)
729 : builtins->builtin(Builtins::kNotifyLazyDeoptimized);
730 output_frame->SetContinuation(
731 reinterpret_cast<uint32_t>(continuation->entry()));
732 }
733 }
734
FillInputFrame(Address tos,JavaScriptFrame * frame)735 void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
736 // Set the register values. The values are not important as there are no
737 // callee saved registers in JavaScript frames, so all registers are
738 // spilled. Registers fp and sp are set to the correct values though.
739
740 for (int i = 0; i < Register::kNumRegisters; i++) {
741 input_->SetRegister(i, i * 4);
742 }
743 input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
744 input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
745 for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
746 input_->SetDoubleRegister(i, 0.0);
747 }
748
749 // Fill the frame content from the actual data on the frame.
750 for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
751 input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
752 }
753 }
754
755
756 #define __ masm()->
757
758
759 // This code tries to be close to ia32 code so that any changes can be
760 // easily ported.
Generate()761 void Deoptimizer::EntryGenerator::Generate() {
762 GeneratePrologue();
763
764 Isolate* isolate = masm()->isolate();
765
766 CpuFeatures::Scope scope(FPU);
767 // Unlike on ARM we don't save all the registers, just the useful ones.
768 // For the rest, there are gaps on the stack, so the offsets remain the same.
769 const int kNumberOfRegisters = Register::kNumRegisters;
770
771 RegList restored_regs = kJSCallerSaved | kCalleeSaved;
772 RegList saved_regs = restored_regs | sp.bit() | ra.bit();
773
774 const int kDoubleRegsSize =
775 kDoubleSize * FPURegister::kNumAllocatableRegisters;
776
777 // Save all FPU registers before messing with them.
778 __ Subu(sp, sp, Operand(kDoubleRegsSize));
779 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
780 FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
781 int offset = i * kDoubleSize;
782 __ sdc1(fpu_reg, MemOperand(sp, offset));
783 }
784
785 // Push saved_regs (needed to populate FrameDescription::registers_).
786 // Leave gaps for other registers.
787 __ Subu(sp, sp, kNumberOfRegisters * kPointerSize);
788 for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
789 if ((saved_regs & (1 << i)) != 0) {
790 __ sw(ToRegister(i), MemOperand(sp, kPointerSize * i));
791 }
792 }
793
794 const int kSavedRegistersAreaSize =
795 (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
796
797 // Get the bailout id from the stack.
798 __ lw(a2, MemOperand(sp, kSavedRegistersAreaSize));
799
800 // Get the address of the location in the code object if possible (a3) (return
801 // address for lazy deoptimization) and compute the fp-to-sp delta in
802 // register t0.
803 if (type() == EAGER) {
804 __ mov(a3, zero_reg);
805 // Correct one word for bailout id.
806 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
807 } else if (type() == OSR) {
808 __ mov(a3, ra);
809 // Correct one word for bailout id.
810 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
811 } else {
812 __ mov(a3, ra);
813 // Correct two words for bailout id and return address.
814 __ Addu(t0, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
815 }
816
817 __ Subu(t0, fp, t0);
818
819 // Allocate a new deoptimizer object.
820 // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
821 __ PrepareCallCFunction(6, t1);
822 __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
823 __ li(a1, Operand(type())); // bailout type,
824 // a2: bailout id already loaded.
825 // a3: code address or 0 already loaded.
826 __ sw(t0, CFunctionArgumentOperand(5)); // Fp-to-sp delta.
827 __ li(t1, Operand(ExternalReference::isolate_address()));
828 __ sw(t1, CFunctionArgumentOperand(6)); // Isolate.
829 // Call Deoptimizer::New().
830 {
831 AllowExternalCallThatCantCauseGC scope(masm());
832 __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
833 }
834
835 // Preserve "deoptimizer" object in register v0 and get the input
836 // frame descriptor pointer to a1 (deoptimizer->input_);
837 // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
838 __ mov(a0, v0);
839 __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
840
841 // Copy core registers into FrameDescription::registers_[kNumRegisters].
842 ASSERT(Register::kNumRegisters == kNumberOfRegisters);
843 for (int i = 0; i < kNumberOfRegisters; i++) {
844 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
845 if ((saved_regs & (1 << i)) != 0) {
846 __ lw(a2, MemOperand(sp, i * kPointerSize));
847 __ sw(a2, MemOperand(a1, offset));
848 } else if (FLAG_debug_code) {
849 __ li(a2, kDebugZapValue);
850 __ sw(a2, MemOperand(a1, offset));
851 }
852 }
853
854 // Copy FPU registers to
855 // double_registers_[DoubleRegister::kNumAllocatableRegisters]
856 int double_regs_offset = FrameDescription::double_registers_offset();
857 for (int i = 0; i < FPURegister::kNumAllocatableRegisters; ++i) {
858 int dst_offset = i * kDoubleSize + double_regs_offset;
859 int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
860 __ ldc1(f0, MemOperand(sp, src_offset));
861 __ sdc1(f0, MemOperand(a1, dst_offset));
862 }
863
864 // Remove the bailout id, eventually return address, and the saved registers
865 // from the stack.
866 if (type() == EAGER || type() == OSR) {
867 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
868 } else {
869 __ Addu(sp, sp, Operand(kSavedRegistersAreaSize + (2 * kPointerSize)));
870 }
871
872 // Compute a pointer to the unwinding limit in register a2; that is
873 // the first stack slot not part of the input frame.
874 __ lw(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
875 __ Addu(a2, a2, sp);
876
877 // Unwind the stack down to - but not including - the unwinding
878 // limit and copy the contents of the activation frame to the input
879 // frame description.
880 __ Addu(a3, a1, Operand(FrameDescription::frame_content_offset()));
881 Label pop_loop;
882 __ bind(&pop_loop);
883 __ pop(t0);
884 __ sw(t0, MemOperand(a3, 0));
885 __ Branch(USE_DELAY_SLOT, &pop_loop, ne, a2, Operand(sp));
886 __ addiu(a3, a3, sizeof(uint32_t)); // In delay slot.
887
888 // Compute the output frame in the deoptimizer.
889 __ push(a0); // Preserve deoptimizer object across call.
890 // a0: deoptimizer object; a1: scratch.
891 __ PrepareCallCFunction(1, a1);
892 // Call Deoptimizer::ComputeOutputFrames().
893 {
894 AllowExternalCallThatCantCauseGC scope(masm());
895 __ CallCFunction(
896 ExternalReference::compute_output_frames_function(isolate), 1);
897 }
898 __ pop(a0); // Restore deoptimizer object (class Deoptimizer).
899
900 // Replace the current (input) frame with the output frames.
901 Label outer_push_loop, inner_push_loop;
902 // Outer loop state: a0 = current "FrameDescription** output_",
903 // a1 = one past the last FrameDescription**.
904 __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
905 __ lw(a0, MemOperand(a0, Deoptimizer::output_offset())); // a0 is output_.
906 __ sll(a1, a1, kPointerSizeLog2); // Count to offset.
907 __ addu(a1, a0, a1); // a1 = one past the last FrameDescription**.
908 __ bind(&outer_push_loop);
909 // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
910 __ lw(a2, MemOperand(a0, 0)); // output_[ix]
911 __ lw(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
912 __ bind(&inner_push_loop);
913 __ Subu(a3, a3, Operand(sizeof(uint32_t)));
914 __ Addu(t2, a2, Operand(a3));
915 __ lw(t3, MemOperand(t2, FrameDescription::frame_content_offset()));
916 __ push(t3);
917 __ Branch(&inner_push_loop, ne, a3, Operand(zero_reg));
918
919 __ Addu(a0, a0, Operand(kPointerSize));
920 __ Branch(&outer_push_loop, lt, a0, Operand(a1));
921
922
923 // Push state, pc, and continuation from the last output frame.
924 if (type() != OSR) {
925 __ lw(t2, MemOperand(a2, FrameDescription::state_offset()));
926 __ push(t2);
927 }
928
929 __ lw(t2, MemOperand(a2, FrameDescription::pc_offset()));
930 __ push(t2);
931 __ lw(t2, MemOperand(a2, FrameDescription::continuation_offset()));
932 __ push(t2);
933
934
935 // Technically restoring 'at' should work unless zero_reg is also restored
936 // but it's safer to check for this.
937 ASSERT(!(at.bit() & restored_regs));
938 // Restore the registers from the last output frame.
939 __ mov(at, a2);
940 for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
941 int offset = (i * kPointerSize) + FrameDescription::registers_offset();
942 if ((restored_regs & (1 << i)) != 0) {
943 __ lw(ToRegister(i), MemOperand(at, offset));
944 }
945 }
946
947 __ InitializeRootRegister();
948
949 __ pop(at); // Get continuation, leave pc on stack.
950 __ pop(ra);
951 __ Jump(at);
952 __ stop("Unreachable.");
953 }
954
955
956 // Maximum size of a table entry generated below.
957 const int Deoptimizer::table_entry_size_ = 9 * Assembler::kInstrSize;
958
GeneratePrologue()959 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
960 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
961
962 // Create a sequence of deoptimization entries. Note that any
963 // registers may be still live.
964 Label table_start;
965 __ bind(&table_start);
966 for (int i = 0; i < count(); i++) {
967 Label start;
968 __ bind(&start);
969 if (type() != EAGER) {
970 // Emulate ia32 like call by pushing return address to stack.
971 __ addiu(sp, sp, -2 * kPointerSize);
972 __ sw(ra, MemOperand(sp, 1 * kPointerSize));
973 } else {
974 __ addiu(sp, sp, -1 * kPointerSize);
975 }
976 // Jump over the remaining deopt entries (including this one).
977 // This code is always reached by calling Jump, which puts the target (label
978 // start) into t9.
979 const int remaining_entries = (count() - i) * table_entry_size_;
980 __ Addu(t9, t9, remaining_entries);
981 // 'at' was clobbered so we can only load the current entry value here.
982 __ li(at, i);
983 __ jr(t9); // Expose delay slot.
984 __ sw(at, MemOperand(sp, 0 * kPointerSize)); // In the delay slot.
985
986 // Pad the rest of the code.
987 while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
988 __ nop();
989 }
990
991 ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
992 }
993
994 ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
995 count() * table_entry_size_);
996 }
997
998 #undef __
999
1000
1001 } } // namespace v8::internal
1002