1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #include "codegen.h"
31 #include "deoptimizer.h"
32 #include "disasm.h"
33 #include "full-codegen.h"
34 #include "global-handles.h"
35 #include "macro-assembler.h"
36 #include "prettyprinter.h"
37
38
39 namespace v8 {
40 namespace internal {
41
DeoptimizerData()42 DeoptimizerData::DeoptimizerData() {
43 eager_deoptimization_entry_code_ = NULL;
44 lazy_deoptimization_entry_code_ = NULL;
45 current_ = NULL;
46 deoptimizing_code_list_ = NULL;
47 }
48
49
~DeoptimizerData()50 DeoptimizerData::~DeoptimizerData() {
51 if (eager_deoptimization_entry_code_ != NULL) {
52 eager_deoptimization_entry_code_->Free(EXECUTABLE);
53 eager_deoptimization_entry_code_ = NULL;
54 }
55 if (lazy_deoptimization_entry_code_ != NULL) {
56 lazy_deoptimization_entry_code_->Free(EXECUTABLE);
57 lazy_deoptimization_entry_code_ = NULL;
58 }
59 }
60
New(JSFunction * function,BailoutType type,unsigned bailout_id,Address from,int fp_to_sp_delta,Isolate * isolate)61 Deoptimizer* Deoptimizer::New(JSFunction* function,
62 BailoutType type,
63 unsigned bailout_id,
64 Address from,
65 int fp_to_sp_delta,
66 Isolate* isolate) {
67 ASSERT(isolate == Isolate::Current());
68 Deoptimizer* deoptimizer = new Deoptimizer(isolate,
69 function,
70 type,
71 bailout_id,
72 from,
73 fp_to_sp_delta);
74 ASSERT(isolate->deoptimizer_data()->current_ == NULL);
75 isolate->deoptimizer_data()->current_ = deoptimizer;
76 return deoptimizer;
77 }
78
79
Grab(Isolate * isolate)80 Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
81 ASSERT(isolate == Isolate::Current());
82 Deoptimizer* result = isolate->deoptimizer_data()->current_;
83 ASSERT(result != NULL);
84 result->DeleteFrameDescriptions();
85 isolate->deoptimizer_data()->current_ = NULL;
86 return result;
87 }
88
89
GenerateDeoptimizationEntries(MacroAssembler * masm,int count,BailoutType type)90 void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
91 int count,
92 BailoutType type) {
93 TableEntryGenerator generator(masm, type, count);
94 generator.Generate();
95 }
96
97
98 class DeoptimizingVisitor : public OptimizedFunctionVisitor {
99 public:
EnterContext(Context * context)100 virtual void EnterContext(Context* context) {
101 if (FLAG_trace_deopt) {
102 PrintF("[deoptimize context: %" V8PRIxPTR "]\n",
103 reinterpret_cast<intptr_t>(context));
104 }
105 }
106
VisitFunction(JSFunction * function)107 virtual void VisitFunction(JSFunction* function) {
108 Deoptimizer::DeoptimizeFunction(function);
109 }
110
LeaveContext(Context * context)111 virtual void LeaveContext(Context* context) {
112 context->ClearOptimizedFunctions();
113 }
114 };
115
116
DeoptimizeAll()117 void Deoptimizer::DeoptimizeAll() {
118 AssertNoAllocation no_allocation;
119
120 if (FLAG_trace_deopt) {
121 PrintF("[deoptimize all contexts]\n");
122 }
123
124 DeoptimizingVisitor visitor;
125 VisitAllOptimizedFunctions(&visitor);
126 }
127
128
DeoptimizeGlobalObject(JSObject * object)129 void Deoptimizer::DeoptimizeGlobalObject(JSObject* object) {
130 AssertNoAllocation no_allocation;
131
132 DeoptimizingVisitor visitor;
133 VisitAllOptimizedFunctionsForGlobalObject(object, &visitor);
134 }
135
136
VisitAllOptimizedFunctionsForContext(Context * context,OptimizedFunctionVisitor * visitor)137 void Deoptimizer::VisitAllOptimizedFunctionsForContext(
138 Context* context, OptimizedFunctionVisitor* visitor) {
139 AssertNoAllocation no_allocation;
140
141 ASSERT(context->IsGlobalContext());
142
143 visitor->EnterContext(context);
144 // Run through the list of optimized functions and deoptimize them.
145 Object* element = context->OptimizedFunctionsListHead();
146 while (!element->IsUndefined()) {
147 JSFunction* element_function = JSFunction::cast(element);
148 // Get the next link before deoptimizing as deoptimizing will clear the
149 // next link.
150 element = element_function->next_function_link();
151 visitor->VisitFunction(element_function);
152 }
153 visitor->LeaveContext(context);
154 }
155
156
VisitAllOptimizedFunctionsForGlobalObject(JSObject * object,OptimizedFunctionVisitor * visitor)157 void Deoptimizer::VisitAllOptimizedFunctionsForGlobalObject(
158 JSObject* object, OptimizedFunctionVisitor* visitor) {
159 AssertNoAllocation no_allocation;
160
161 if (object->IsJSGlobalProxy()) {
162 Object* proto = object->GetPrototype();
163 ASSERT(proto->IsJSGlobalObject());
164 VisitAllOptimizedFunctionsForContext(
165 GlobalObject::cast(proto)->global_context(), visitor);
166 } else if (object->IsGlobalObject()) {
167 VisitAllOptimizedFunctionsForContext(
168 GlobalObject::cast(object)->global_context(), visitor);
169 }
170 }
171
172
VisitAllOptimizedFunctions(OptimizedFunctionVisitor * visitor)173 void Deoptimizer::VisitAllOptimizedFunctions(
174 OptimizedFunctionVisitor* visitor) {
175 AssertNoAllocation no_allocation;
176
177 // Run through the list of all global contexts and deoptimize.
178 Object* global = Isolate::Current()->heap()->global_contexts_list();
179 while (!global->IsUndefined()) {
180 VisitAllOptimizedFunctionsForGlobalObject(Context::cast(global)->global(),
181 visitor);
182 global = Context::cast(global)->get(Context::NEXT_CONTEXT_LINK);
183 }
184 }
185
186
HandleWeakDeoptimizedCode(v8::Persistent<v8::Value> obj,void * data)187 void Deoptimizer::HandleWeakDeoptimizedCode(
188 v8::Persistent<v8::Value> obj, void* data) {
189 DeoptimizingCodeListNode* node =
190 reinterpret_cast<DeoptimizingCodeListNode*>(data);
191 RemoveDeoptimizingCode(*node->code());
192 #ifdef DEBUG
193 node = Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
194 while (node != NULL) {
195 ASSERT(node != reinterpret_cast<DeoptimizingCodeListNode*>(data));
196 node = node->next();
197 }
198 #endif
199 }
200
201
ComputeOutputFrames(Deoptimizer * deoptimizer)202 void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
203 deoptimizer->DoComputeOutputFrames();
204 }
205
206
Deoptimizer(Isolate * isolate,JSFunction * function,BailoutType type,unsigned bailout_id,Address from,int fp_to_sp_delta)207 Deoptimizer::Deoptimizer(Isolate* isolate,
208 JSFunction* function,
209 BailoutType type,
210 unsigned bailout_id,
211 Address from,
212 int fp_to_sp_delta)
213 : isolate_(isolate),
214 function_(function),
215 bailout_id_(bailout_id),
216 bailout_type_(type),
217 from_(from),
218 fp_to_sp_delta_(fp_to_sp_delta),
219 output_count_(0),
220 output_(NULL),
221 deferred_heap_numbers_(0) {
222 if (FLAG_trace_deopt && type != OSR) {
223 PrintF("**** DEOPT: ");
224 function->PrintName();
225 PrintF(" at bailout #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
226 bailout_id,
227 reinterpret_cast<intptr_t>(from),
228 fp_to_sp_delta - (2 * kPointerSize));
229 } else if (FLAG_trace_osr && type == OSR) {
230 PrintF("**** OSR: ");
231 function->PrintName();
232 PrintF(" at ast id #%u, address 0x%" V8PRIxPTR ", frame size %d\n",
233 bailout_id,
234 reinterpret_cast<intptr_t>(from),
235 fp_to_sp_delta - (2 * kPointerSize));
236 }
237 // Find the optimized code.
238 if (type == EAGER) {
239 ASSERT(from == NULL);
240 optimized_code_ = function_->code();
241 } else if (type == LAZY) {
242 optimized_code_ = FindDeoptimizingCodeFromAddress(from);
243 ASSERT(optimized_code_ != NULL);
244 } else if (type == OSR) {
245 // The function has already been optimized and we're transitioning
246 // from the unoptimized shared version to the optimized one in the
247 // function. The return address (from) points to unoptimized code.
248 optimized_code_ = function_->code();
249 ASSERT(optimized_code_->kind() == Code::OPTIMIZED_FUNCTION);
250 ASSERT(!optimized_code_->contains(from));
251 }
252 ASSERT(HEAP->allow_allocation(false));
253 unsigned size = ComputeInputFrameSize();
254 input_ = new(size) FrameDescription(size, function);
255 }
256
257
~Deoptimizer()258 Deoptimizer::~Deoptimizer() {
259 ASSERT(input_ == NULL && output_ == NULL);
260 }
261
262
DeleteFrameDescriptions()263 void Deoptimizer::DeleteFrameDescriptions() {
264 delete input_;
265 for (int i = 0; i < output_count_; ++i) {
266 if (output_[i] != input_) delete output_[i];
267 }
268 delete[] output_;
269 input_ = NULL;
270 output_ = NULL;
271 ASSERT(!HEAP->allow_allocation(true));
272 }
273
274
GetDeoptimizationEntry(int id,BailoutType type)275 Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
276 ASSERT(id >= 0);
277 if (id >= kNumberOfEntries) return NULL;
278 LargeObjectChunk* base = NULL;
279 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
280 if (type == EAGER) {
281 if (data->eager_deoptimization_entry_code_ == NULL) {
282 data->eager_deoptimization_entry_code_ = CreateCode(type);
283 }
284 base = data->eager_deoptimization_entry_code_;
285 } else {
286 if (data->lazy_deoptimization_entry_code_ == NULL) {
287 data->lazy_deoptimization_entry_code_ = CreateCode(type);
288 }
289 base = data->lazy_deoptimization_entry_code_;
290 }
291 return
292 static_cast<Address>(base->GetStartAddress()) + (id * table_entry_size_);
293 }
294
295
GetDeoptimizationId(Address addr,BailoutType type)296 int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
297 LargeObjectChunk* base = NULL;
298 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
299 if (type == EAGER) {
300 base = data->eager_deoptimization_entry_code_;
301 } else {
302 base = data->lazy_deoptimization_entry_code_;
303 }
304 if (base == NULL ||
305 addr < base->GetStartAddress() ||
306 addr >= base->GetStartAddress() +
307 (kNumberOfEntries * table_entry_size_)) {
308 return kNotDeoptimizationEntry;
309 }
310 ASSERT_EQ(0,
311 static_cast<int>(addr - base->GetStartAddress()) % table_entry_size_);
312 return static_cast<int>(addr - base->GetStartAddress()) / table_entry_size_;
313 }
314
315
GetOutputInfo(DeoptimizationOutputData * data,unsigned id,SharedFunctionInfo * shared)316 int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
317 unsigned id,
318 SharedFunctionInfo* shared) {
319 // TODO(kasperl): For now, we do a simple linear search for the PC
320 // offset associated with the given node id. This should probably be
321 // changed to a binary search.
322 int length = data->DeoptPoints();
323 Smi* smi_id = Smi::FromInt(id);
324 for (int i = 0; i < length; i++) {
325 if (data->AstId(i) == smi_id) {
326 return data->PcAndState(i)->value();
327 }
328 }
329 PrintF("[couldn't find pc offset for node=%u]\n", id);
330 PrintF("[method: %s]\n", *shared->DebugName()->ToCString());
331 // Print the source code if available.
332 HeapStringAllocator string_allocator;
333 StringStream stream(&string_allocator);
334 shared->SourceCodePrint(&stream, -1);
335 PrintF("[source:\n%s\n]", *stream.ToCString());
336
337 UNREACHABLE();
338 return -1;
339 }
340
341
GetDeoptimizedCodeCount(Isolate * isolate)342 int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
343 int length = 0;
344 DeoptimizingCodeListNode* node =
345 isolate->deoptimizer_data()->deoptimizing_code_list_;
346 while (node != NULL) {
347 length++;
348 node = node->next();
349 }
350 return length;
351 }
352
353
DoComputeOutputFrames()354 void Deoptimizer::DoComputeOutputFrames() {
355 if (bailout_type_ == OSR) {
356 DoComputeOsrOutputFrame();
357 return;
358 }
359
360 // Print some helpful diagnostic information.
361 int64_t start = OS::Ticks();
362 if (FLAG_trace_deopt) {
363 PrintF("[deoptimizing%s: begin 0x%08" V8PRIxPTR " ",
364 (bailout_type_ == LAZY ? " (lazy)" : ""),
365 reinterpret_cast<intptr_t>(function_));
366 function_->PrintName();
367 PrintF(" @%d]\n", bailout_id_);
368 }
369
370 // Determine basic deoptimization information. The optimized frame is
371 // described by the input data.
372 DeoptimizationInputData* input_data =
373 DeoptimizationInputData::cast(optimized_code_->deoptimization_data());
374 unsigned node_id = input_data->AstId(bailout_id_)->value();
375 ByteArray* translations = input_data->TranslationByteArray();
376 unsigned translation_index =
377 input_data->TranslationIndex(bailout_id_)->value();
378
379 // Do the input frame to output frame(s) translation.
380 TranslationIterator iterator(translations, translation_index);
381 Translation::Opcode opcode =
382 static_cast<Translation::Opcode>(iterator.Next());
383 ASSERT(Translation::BEGIN == opcode);
384 USE(opcode);
385 // Read the number of output frames and allocate an array for their
386 // descriptions.
387 int count = iterator.Next();
388 ASSERT(output_ == NULL);
389 output_ = new FrameDescription*[count];
390 for (int i = 0; i < count; ++i) {
391 output_[i] = NULL;
392 }
393 output_count_ = count;
394
395 // Translate each output frame.
396 for (int i = 0; i < count; ++i) {
397 DoComputeFrame(&iterator, i);
398 }
399
400 // Print some helpful diagnostic information.
401 if (FLAG_trace_deopt) {
402 double ms = static_cast<double>(OS::Ticks() - start) / 1000;
403 int index = output_count_ - 1; // Index of the topmost frame.
404 JSFunction* function = output_[index]->GetFunction();
405 PrintF("[deoptimizing: end 0x%08" V8PRIxPTR " ",
406 reinterpret_cast<intptr_t>(function));
407 function->PrintName();
408 PrintF(" => node=%u, pc=0x%08" V8PRIxPTR ", state=%s, took %0.3f ms]\n",
409 node_id,
410 output_[index]->GetPc(),
411 FullCodeGenerator::State2String(
412 static_cast<FullCodeGenerator::State>(
413 output_[index]->GetState()->value())),
414 ms);
415 }
416 }
417
418
MaterializeHeapNumbers()419 void Deoptimizer::MaterializeHeapNumbers() {
420 for (int i = 0; i < deferred_heap_numbers_.length(); i++) {
421 HeapNumberMaterializationDescriptor d = deferred_heap_numbers_[i];
422 Handle<Object> num = isolate_->factory()->NewNumber(d.value());
423 if (FLAG_trace_deopt) {
424 PrintF("Materializing a new heap number %p [%e] in slot %p\n",
425 reinterpret_cast<void*>(*num),
426 d.value(),
427 d.slot_address());
428 }
429
430 Memory::Object_at(d.slot_address()) = *num;
431 }
432 }
433
434
DoTranslateCommand(TranslationIterator * iterator,int frame_index,unsigned output_offset)435 void Deoptimizer::DoTranslateCommand(TranslationIterator* iterator,
436 int frame_index,
437 unsigned output_offset) {
438 disasm::NameConverter converter;
439 // A GC-safe temporary placeholder that we can put in the output frame.
440 const intptr_t kPlaceholder = reinterpret_cast<intptr_t>(Smi::FromInt(0));
441
442 // Ignore commands marked as duplicate and act on the first non-duplicate.
443 Translation::Opcode opcode =
444 static_cast<Translation::Opcode>(iterator->Next());
445 while (opcode == Translation::DUPLICATE) {
446 opcode = static_cast<Translation::Opcode>(iterator->Next());
447 iterator->Skip(Translation::NumberOfOperandsFor(opcode));
448 opcode = static_cast<Translation::Opcode>(iterator->Next());
449 }
450
451 switch (opcode) {
452 case Translation::BEGIN:
453 case Translation::FRAME:
454 case Translation::DUPLICATE:
455 UNREACHABLE();
456 return;
457
458 case Translation::REGISTER: {
459 int input_reg = iterator->Next();
460 intptr_t input_value = input_->GetRegister(input_reg);
461 if (FLAG_trace_deopt) {
462 PrintF(
463 " 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s\n",
464 output_[frame_index]->GetTop() + output_offset,
465 output_offset,
466 input_value,
467 converter.NameOfCPURegister(input_reg));
468 }
469 output_[frame_index]->SetFrameSlot(output_offset, input_value);
470 return;
471 }
472
473 case Translation::INT32_REGISTER: {
474 int input_reg = iterator->Next();
475 intptr_t value = input_->GetRegister(input_reg);
476 bool is_smi = Smi::IsValid(value);
477 if (FLAG_trace_deopt) {
478 PrintF(
479 " 0x%08" V8PRIxPTR ": [top + %d] <- %" V8PRIdPTR " ; %s (%s)\n",
480 output_[frame_index]->GetTop() + output_offset,
481 output_offset,
482 value,
483 converter.NameOfCPURegister(input_reg),
484 is_smi ? "smi" : "heap number");
485 }
486 if (is_smi) {
487 intptr_t tagged_value =
488 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
489 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
490 } else {
491 // We save the untagged value on the side and store a GC-safe
492 // temporary placeholder in the frame.
493 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
494 static_cast<double>(static_cast<int32_t>(value)));
495 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
496 }
497 return;
498 }
499
500 case Translation::DOUBLE_REGISTER: {
501 int input_reg = iterator->Next();
502 double value = input_->GetDoubleRegister(input_reg);
503 if (FLAG_trace_deopt) {
504 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; %s\n",
505 output_[frame_index]->GetTop() + output_offset,
506 output_offset,
507 value,
508 DoubleRegister::AllocationIndexToString(input_reg));
509 }
510 // We save the untagged value on the side and store a GC-safe
511 // temporary placeholder in the frame.
512 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
513 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
514 return;
515 }
516
517 case Translation::STACK_SLOT: {
518 int input_slot_index = iterator->Next();
519 unsigned input_offset =
520 input_->GetOffsetFromSlotIndex(this, input_slot_index);
521 intptr_t input_value = input_->GetFrameSlot(input_offset);
522 if (FLAG_trace_deopt) {
523 PrintF(" 0x%08" V8PRIxPTR ": ",
524 output_[frame_index]->GetTop() + output_offset);
525 PrintF("[top + %d] <- 0x%08" V8PRIxPTR " ; [esp + %d]\n",
526 output_offset,
527 input_value,
528 input_offset);
529 }
530 output_[frame_index]->SetFrameSlot(output_offset, input_value);
531 return;
532 }
533
534 case Translation::INT32_STACK_SLOT: {
535 int input_slot_index = iterator->Next();
536 unsigned input_offset =
537 input_->GetOffsetFromSlotIndex(this, input_slot_index);
538 intptr_t value = input_->GetFrameSlot(input_offset);
539 bool is_smi = Smi::IsValid(value);
540 if (FLAG_trace_deopt) {
541 PrintF(" 0x%08" V8PRIxPTR ": ",
542 output_[frame_index]->GetTop() + output_offset);
543 PrintF("[top + %d] <- %" V8PRIdPTR " ; [esp + %d] (%s)\n",
544 output_offset,
545 value,
546 input_offset,
547 is_smi ? "smi" : "heap number");
548 }
549 if (is_smi) {
550 intptr_t tagged_value =
551 reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(value)));
552 output_[frame_index]->SetFrameSlot(output_offset, tagged_value);
553 } else {
554 // We save the untagged value on the side and store a GC-safe
555 // temporary placeholder in the frame.
556 AddDoubleValue(output_[frame_index]->GetTop() + output_offset,
557 static_cast<double>(static_cast<int32_t>(value)));
558 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
559 }
560 return;
561 }
562
563 case Translation::DOUBLE_STACK_SLOT: {
564 int input_slot_index = iterator->Next();
565 unsigned input_offset =
566 input_->GetOffsetFromSlotIndex(this, input_slot_index);
567 double value = input_->GetDoubleFrameSlot(input_offset);
568 if (FLAG_trace_deopt) {
569 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- %e ; [esp + %d]\n",
570 output_[frame_index]->GetTop() + output_offset,
571 output_offset,
572 value,
573 input_offset);
574 }
575 // We save the untagged value on the side and store a GC-safe
576 // temporary placeholder in the frame.
577 AddDoubleValue(output_[frame_index]->GetTop() + output_offset, value);
578 output_[frame_index]->SetFrameSlot(output_offset, kPlaceholder);
579 return;
580 }
581
582 case Translation::LITERAL: {
583 Object* literal = ComputeLiteral(iterator->Next());
584 if (FLAG_trace_deopt) {
585 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
586 output_[frame_index]->GetTop() + output_offset,
587 output_offset);
588 literal->ShortPrint();
589 PrintF(" ; literal\n");
590 }
591 intptr_t value = reinterpret_cast<intptr_t>(literal);
592 output_[frame_index]->SetFrameSlot(output_offset, value);
593 return;
594 }
595
596 case Translation::ARGUMENTS_OBJECT: {
597 // Use the arguments marker value as a sentinel and fill in the arguments
598 // object after the deoptimized frame is built.
599 ASSERT(frame_index == 0); // Only supported for first frame.
600 if (FLAG_trace_deopt) {
601 PrintF(" 0x%08" V8PRIxPTR ": [top + %d] <- ",
602 output_[frame_index]->GetTop() + output_offset,
603 output_offset);
604 isolate_->heap()->arguments_marker()->ShortPrint();
605 PrintF(" ; arguments object\n");
606 }
607 intptr_t value = reinterpret_cast<intptr_t>(
608 isolate_->heap()->arguments_marker());
609 output_[frame_index]->SetFrameSlot(output_offset, value);
610 return;
611 }
612 }
613 }
614
615
DoOsrTranslateCommand(TranslationIterator * iterator,int * input_offset)616 bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
617 int* input_offset) {
618 disasm::NameConverter converter;
619 FrameDescription* output = output_[0];
620
621 // The input values are all part of the unoptimized frame so they
622 // are all tagged pointers.
623 uintptr_t input_value = input_->GetFrameSlot(*input_offset);
624 Object* input_object = reinterpret_cast<Object*>(input_value);
625
626 Translation::Opcode opcode =
627 static_cast<Translation::Opcode>(iterator->Next());
628 bool duplicate = (opcode == Translation::DUPLICATE);
629 if (duplicate) {
630 opcode = static_cast<Translation::Opcode>(iterator->Next());
631 }
632
633 switch (opcode) {
634 case Translation::BEGIN:
635 case Translation::FRAME:
636 case Translation::DUPLICATE:
637 UNREACHABLE(); // Malformed input.
638 return false;
639
640 case Translation::REGISTER: {
641 int output_reg = iterator->Next();
642 if (FLAG_trace_osr) {
643 PrintF(" %s <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
644 converter.NameOfCPURegister(output_reg),
645 input_value,
646 *input_offset);
647 }
648 output->SetRegister(output_reg, input_value);
649 break;
650 }
651
652 case Translation::INT32_REGISTER: {
653 // Abort OSR if we don't have a number.
654 if (!input_object->IsNumber()) return false;
655
656 int output_reg = iterator->Next();
657 int int32_value = input_object->IsSmi()
658 ? Smi::cast(input_object)->value()
659 : FastD2I(input_object->Number());
660 // Abort the translation if the conversion lost information.
661 if (!input_object->IsSmi() &&
662 FastI2D(int32_value) != input_object->Number()) {
663 if (FLAG_trace_osr) {
664 PrintF("**** %g could not be converted to int32 ****\n",
665 input_object->Number());
666 }
667 return false;
668 }
669 if (FLAG_trace_osr) {
670 PrintF(" %s <- %d (int32) ; [sp + %d]\n",
671 converter.NameOfCPURegister(output_reg),
672 int32_value,
673 *input_offset);
674 }
675 output->SetRegister(output_reg, int32_value);
676 break;
677 }
678
679 case Translation::DOUBLE_REGISTER: {
680 // Abort OSR if we don't have a number.
681 if (!input_object->IsNumber()) return false;
682
683 int output_reg = iterator->Next();
684 double double_value = input_object->Number();
685 if (FLAG_trace_osr) {
686 PrintF(" %s <- %g (double) ; [sp + %d]\n",
687 DoubleRegister::AllocationIndexToString(output_reg),
688 double_value,
689 *input_offset);
690 }
691 output->SetDoubleRegister(output_reg, double_value);
692 break;
693 }
694
695 case Translation::STACK_SLOT: {
696 int output_index = iterator->Next();
697 unsigned output_offset =
698 output->GetOffsetFromSlotIndex(this, output_index);
699 if (FLAG_trace_osr) {
700 PrintF(" [sp + %d] <- 0x%08" V8PRIxPTR " ; [sp + %d]\n",
701 output_offset,
702 input_value,
703 *input_offset);
704 }
705 output->SetFrameSlot(output_offset, input_value);
706 break;
707 }
708
709 case Translation::INT32_STACK_SLOT: {
710 // Abort OSR if we don't have a number.
711 if (!input_object->IsNumber()) return false;
712
713 int output_index = iterator->Next();
714 unsigned output_offset =
715 output->GetOffsetFromSlotIndex(this, output_index);
716 int int32_value = input_object->IsSmi()
717 ? Smi::cast(input_object)->value()
718 : DoubleToInt32(input_object->Number());
719 // Abort the translation if the conversion lost information.
720 if (!input_object->IsSmi() &&
721 FastI2D(int32_value) != input_object->Number()) {
722 if (FLAG_trace_osr) {
723 PrintF("**** %g could not be converted to int32 ****\n",
724 input_object->Number());
725 }
726 return false;
727 }
728 if (FLAG_trace_osr) {
729 PrintF(" [sp + %d] <- %d (int32) ; [sp + %d]\n",
730 output_offset,
731 int32_value,
732 *input_offset);
733 }
734 output->SetFrameSlot(output_offset, int32_value);
735 break;
736 }
737
738 case Translation::DOUBLE_STACK_SLOT: {
739 static const int kLowerOffset = 0 * kPointerSize;
740 static const int kUpperOffset = 1 * kPointerSize;
741
742 // Abort OSR if we don't have a number.
743 if (!input_object->IsNumber()) return false;
744
745 int output_index = iterator->Next();
746 unsigned output_offset =
747 output->GetOffsetFromSlotIndex(this, output_index);
748 double double_value = input_object->Number();
749 uint64_t int_value = BitCast<uint64_t, double>(double_value);
750 int32_t lower = static_cast<int32_t>(int_value);
751 int32_t upper = static_cast<int32_t>(int_value >> kBitsPerInt);
752 if (FLAG_trace_osr) {
753 PrintF(" [sp + %d] <- 0x%08x (upper bits of %g) ; [sp + %d]\n",
754 output_offset + kUpperOffset,
755 upper,
756 double_value,
757 *input_offset);
758 PrintF(" [sp + %d] <- 0x%08x (lower bits of %g) ; [sp + %d]\n",
759 output_offset + kLowerOffset,
760 lower,
761 double_value,
762 *input_offset);
763 }
764 output->SetFrameSlot(output_offset + kLowerOffset, lower);
765 output->SetFrameSlot(output_offset + kUpperOffset, upper);
766 break;
767 }
768
769 case Translation::LITERAL: {
770 // Just ignore non-materialized literals.
771 iterator->Next();
772 break;
773 }
774
775 case Translation::ARGUMENTS_OBJECT: {
776 // Optimized code assumes that the argument object has not been
777 // materialized and so bypasses it when doing arguments access.
778 // We should have bailed out before starting the frame
779 // translation.
780 UNREACHABLE();
781 return false;
782 }
783 }
784
785 if (!duplicate) *input_offset -= kPointerSize;
786 return true;
787 }
788
789
PatchStackCheckCode(Code * unoptimized_code,Code * check_code,Code * replacement_code)790 void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
791 Code* check_code,
792 Code* replacement_code) {
793 // Iterate over the stack check table and patch every stack check
794 // call to an unconditional call to the replacement code.
795 ASSERT(unoptimized_code->kind() == Code::FUNCTION);
796 Address stack_check_cursor = unoptimized_code->instruction_start() +
797 unoptimized_code->stack_check_table_offset();
798 uint32_t table_length = Memory::uint32_at(stack_check_cursor);
799 stack_check_cursor += kIntSize;
800 for (uint32_t i = 0; i < table_length; ++i) {
801 uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
802 Address pc_after = unoptimized_code->instruction_start() + pc_offset;
803 PatchStackCheckCodeAt(pc_after, check_code, replacement_code);
804 stack_check_cursor += 2 * kIntSize;
805 }
806 }
807
808
RevertStackCheckCode(Code * unoptimized_code,Code * check_code,Code * replacement_code)809 void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
810 Code* check_code,
811 Code* replacement_code) {
812 // Iterate over the stack check table and revert the patched
813 // stack check calls.
814 ASSERT(unoptimized_code->kind() == Code::FUNCTION);
815 Address stack_check_cursor = unoptimized_code->instruction_start() +
816 unoptimized_code->stack_check_table_offset();
817 uint32_t table_length = Memory::uint32_at(stack_check_cursor);
818 stack_check_cursor += kIntSize;
819 for (uint32_t i = 0; i < table_length; ++i) {
820 uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
821 Address pc_after = unoptimized_code->instruction_start() + pc_offset;
822 RevertStackCheckCodeAt(pc_after, check_code, replacement_code);
823 stack_check_cursor += 2 * kIntSize;
824 }
825 }
826
827
ComputeInputFrameSize() const828 unsigned Deoptimizer::ComputeInputFrameSize() const {
829 unsigned fixed_size = ComputeFixedSize(function_);
830 // The fp-to-sp delta already takes the context and the function
831 // into account so we have to avoid double counting them (-2).
832 unsigned result = fixed_size + fp_to_sp_delta_ - (2 * kPointerSize);
833 #ifdef DEBUG
834 if (bailout_type_ == OSR) {
835 // TODO(kasperl): It would be nice if we could verify that the
836 // size matches with the stack height we can compute based on the
837 // environment at the OSR entry. The code for that his built into
838 // the DoComputeOsrOutputFrame function for now.
839 } else {
840 unsigned stack_slots = optimized_code_->stack_slots();
841 unsigned outgoing_size = ComputeOutgoingArgumentSize();
842 ASSERT(result == fixed_size + (stack_slots * kPointerSize) + outgoing_size);
843 }
844 #endif
845 return result;
846 }
847
848
ComputeFixedSize(JSFunction * function) const849 unsigned Deoptimizer::ComputeFixedSize(JSFunction* function) const {
850 // The fixed part of the frame consists of the return address, frame
851 // pointer, function, context, and all the incoming arguments.
852 static const unsigned kFixedSlotSize = 4 * kPointerSize;
853 return ComputeIncomingArgumentSize(function) + kFixedSlotSize;
854 }
855
856
ComputeIncomingArgumentSize(JSFunction * function) const857 unsigned Deoptimizer::ComputeIncomingArgumentSize(JSFunction* function) const {
858 // The incoming arguments is the values for formal parameters and
859 // the receiver. Every slot contains a pointer.
860 unsigned arguments = function->shared()->formal_parameter_count() + 1;
861 return arguments * kPointerSize;
862 }
863
864
ComputeOutgoingArgumentSize() const865 unsigned Deoptimizer::ComputeOutgoingArgumentSize() const {
866 DeoptimizationInputData* data = DeoptimizationInputData::cast(
867 optimized_code_->deoptimization_data());
868 unsigned height = data->ArgumentsStackHeight(bailout_id_)->value();
869 return height * kPointerSize;
870 }
871
872
ComputeLiteral(int index) const873 Object* Deoptimizer::ComputeLiteral(int index) const {
874 DeoptimizationInputData* data = DeoptimizationInputData::cast(
875 optimized_code_->deoptimization_data());
876 FixedArray* literals = data->LiteralArray();
877 return literals->get(index);
878 }
879
880
AddDoubleValue(intptr_t slot_address,double value)881 void Deoptimizer::AddDoubleValue(intptr_t slot_address,
882 double value) {
883 HeapNumberMaterializationDescriptor value_desc(
884 reinterpret_cast<Address>(slot_address), value);
885 deferred_heap_numbers_.Add(value_desc);
886 }
887
888
CreateCode(BailoutType type)889 LargeObjectChunk* Deoptimizer::CreateCode(BailoutType type) {
890 // We cannot run this if the serializer is enabled because this will
891 // cause us to emit relocation information for the external
892 // references. This is fine because the deoptimizer's code section
893 // isn't meant to be serialized at all.
894 ASSERT(!Serializer::enabled());
895
896 MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
897 masm.set_emit_debug_code(false);
898 GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
899 CodeDesc desc;
900 masm.GetCode(&desc);
901 ASSERT(desc.reloc_size == 0);
902
903 LargeObjectChunk* chunk = LargeObjectChunk::New(desc.instr_size, EXECUTABLE);
904 memcpy(chunk->GetStartAddress(), desc.buffer, desc.instr_size);
905 CPU::FlushICache(chunk->GetStartAddress(), desc.instr_size);
906 return chunk;
907 }
908
909
FindDeoptimizingCodeFromAddress(Address addr)910 Code* Deoptimizer::FindDeoptimizingCodeFromAddress(Address addr) {
911 DeoptimizingCodeListNode* node =
912 Isolate::Current()->deoptimizer_data()->deoptimizing_code_list_;
913 while (node != NULL) {
914 if (node->code()->contains(addr)) return *node->code();
915 node = node->next();
916 }
917 return NULL;
918 }
919
920
RemoveDeoptimizingCode(Code * code)921 void Deoptimizer::RemoveDeoptimizingCode(Code* code) {
922 DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
923 ASSERT(data->deoptimizing_code_list_ != NULL);
924 // Run through the code objects to find this one and remove it.
925 DeoptimizingCodeListNode* prev = NULL;
926 DeoptimizingCodeListNode* current = data->deoptimizing_code_list_;
927 while (current != NULL) {
928 if (*current->code() == code) {
929 // Unlink from list. If prev is NULL we are looking at the first element.
930 if (prev == NULL) {
931 data->deoptimizing_code_list_ = current->next();
932 } else {
933 prev->set_next(current->next());
934 }
935 delete current;
936 return;
937 }
938 // Move to next in list.
939 prev = current;
940 current = current->next();
941 }
942 // Deoptimizing code is removed through weak callback. Each object is expected
943 // to be removed once and only once.
944 UNREACHABLE();
945 }
946
947
FrameDescription(uint32_t frame_size,JSFunction * function)948 FrameDescription::FrameDescription(uint32_t frame_size,
949 JSFunction* function)
950 : frame_size_(frame_size),
951 function_(function),
952 top_(kZapUint32),
953 pc_(kZapUint32),
954 fp_(kZapUint32) {
955 // Zap all the registers.
956 for (int r = 0; r < Register::kNumRegisters; r++) {
957 SetRegister(r, kZapUint32);
958 }
959
960 // Zap all the slots.
961 for (unsigned o = 0; o < frame_size; o += kPointerSize) {
962 SetFrameSlot(o, kZapUint32);
963 }
964 }
965
966
GetOffsetFromSlotIndex(Deoptimizer * deoptimizer,int slot_index)967 unsigned FrameDescription::GetOffsetFromSlotIndex(Deoptimizer* deoptimizer,
968 int slot_index) {
969 if (slot_index >= 0) {
970 // Local or spill slots. Skip the fixed part of the frame
971 // including all arguments.
972 unsigned base = static_cast<unsigned>(
973 GetFrameSize() - deoptimizer->ComputeFixedSize(GetFunction()));
974 return base - ((slot_index + 1) * kPointerSize);
975 } else {
976 // Incoming parameter.
977 unsigned base = static_cast<unsigned>(GetFrameSize() -
978 deoptimizer->ComputeIncomingArgumentSize(GetFunction()));
979 return base - ((slot_index + 1) * kPointerSize);
980 }
981 }
982
983
Add(int32_t value)984 void TranslationBuffer::Add(int32_t value) {
985 // Encode the sign bit in the least significant bit.
986 bool is_negative = (value < 0);
987 uint32_t bits = ((is_negative ? -value : value) << 1) |
988 static_cast<int32_t>(is_negative);
989 // Encode the individual bytes using the least significant bit of
990 // each byte to indicate whether or not more bytes follow.
991 do {
992 uint32_t next = bits >> 7;
993 contents_.Add(((bits << 1) & 0xFF) | (next != 0));
994 bits = next;
995 } while (bits != 0);
996 }
997
998
Next()999 int32_t TranslationIterator::Next() {
1000 ASSERT(HasNext());
1001 // Run through the bytes until we reach one with a least significant
1002 // bit of zero (marks the end).
1003 uint32_t bits = 0;
1004 for (int i = 0; true; i += 7) {
1005 uint8_t next = buffer_->get(index_++);
1006 bits |= (next >> 1) << i;
1007 if ((next & 1) == 0) break;
1008 }
1009 // The bits encode the sign in the least significant bit.
1010 bool is_negative = (bits & 1) == 1;
1011 int32_t result = bits >> 1;
1012 return is_negative ? -result : result;
1013 }
1014
1015
CreateByteArray()1016 Handle<ByteArray> TranslationBuffer::CreateByteArray() {
1017 int length = contents_.length();
1018 Handle<ByteArray> result =
1019 Isolate::Current()->factory()->NewByteArray(length, TENURED);
1020 memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
1021 return result;
1022 }
1023
1024
BeginFrame(int node_id,int literal_id,unsigned height)1025 void Translation::BeginFrame(int node_id, int literal_id, unsigned height) {
1026 buffer_->Add(FRAME);
1027 buffer_->Add(node_id);
1028 buffer_->Add(literal_id);
1029 buffer_->Add(height);
1030 }
1031
1032
StoreRegister(Register reg)1033 void Translation::StoreRegister(Register reg) {
1034 buffer_->Add(REGISTER);
1035 buffer_->Add(reg.code());
1036 }
1037
1038
StoreInt32Register(Register reg)1039 void Translation::StoreInt32Register(Register reg) {
1040 buffer_->Add(INT32_REGISTER);
1041 buffer_->Add(reg.code());
1042 }
1043
1044
StoreDoubleRegister(DoubleRegister reg)1045 void Translation::StoreDoubleRegister(DoubleRegister reg) {
1046 buffer_->Add(DOUBLE_REGISTER);
1047 buffer_->Add(DoubleRegister::ToAllocationIndex(reg));
1048 }
1049
1050
StoreStackSlot(int index)1051 void Translation::StoreStackSlot(int index) {
1052 buffer_->Add(STACK_SLOT);
1053 buffer_->Add(index);
1054 }
1055
1056
StoreInt32StackSlot(int index)1057 void Translation::StoreInt32StackSlot(int index) {
1058 buffer_->Add(INT32_STACK_SLOT);
1059 buffer_->Add(index);
1060 }
1061
1062
StoreDoubleStackSlot(int index)1063 void Translation::StoreDoubleStackSlot(int index) {
1064 buffer_->Add(DOUBLE_STACK_SLOT);
1065 buffer_->Add(index);
1066 }
1067
1068
StoreLiteral(int literal_id)1069 void Translation::StoreLiteral(int literal_id) {
1070 buffer_->Add(LITERAL);
1071 buffer_->Add(literal_id);
1072 }
1073
1074
StoreArgumentsObject()1075 void Translation::StoreArgumentsObject() {
1076 buffer_->Add(ARGUMENTS_OBJECT);
1077 }
1078
1079
MarkDuplicate()1080 void Translation::MarkDuplicate() {
1081 buffer_->Add(DUPLICATE);
1082 }
1083
1084
NumberOfOperandsFor(Opcode opcode)1085 int Translation::NumberOfOperandsFor(Opcode opcode) {
1086 switch (opcode) {
1087 case ARGUMENTS_OBJECT:
1088 case DUPLICATE:
1089 return 0;
1090 case BEGIN:
1091 case REGISTER:
1092 case INT32_REGISTER:
1093 case DOUBLE_REGISTER:
1094 case STACK_SLOT:
1095 case INT32_STACK_SLOT:
1096 case DOUBLE_STACK_SLOT:
1097 case LITERAL:
1098 return 1;
1099 case FRAME:
1100 return 3;
1101 }
1102 UNREACHABLE();
1103 return -1;
1104 }
1105
1106
1107 #ifdef OBJECT_PRINT
1108
StringFor(Opcode opcode)1109 const char* Translation::StringFor(Opcode opcode) {
1110 switch (opcode) {
1111 case BEGIN:
1112 return "BEGIN";
1113 case FRAME:
1114 return "FRAME";
1115 case REGISTER:
1116 return "REGISTER";
1117 case INT32_REGISTER:
1118 return "INT32_REGISTER";
1119 case DOUBLE_REGISTER:
1120 return "DOUBLE_REGISTER";
1121 case STACK_SLOT:
1122 return "STACK_SLOT";
1123 case INT32_STACK_SLOT:
1124 return "INT32_STACK_SLOT";
1125 case DOUBLE_STACK_SLOT:
1126 return "DOUBLE_STACK_SLOT";
1127 case LITERAL:
1128 return "LITERAL";
1129 case ARGUMENTS_OBJECT:
1130 return "ARGUMENTS_OBJECT";
1131 case DUPLICATE:
1132 return "DUPLICATE";
1133 }
1134 UNREACHABLE();
1135 return "";
1136 }
1137
1138 #endif
1139
1140
DeoptimizingCodeListNode(Code * code)1141 DeoptimizingCodeListNode::DeoptimizingCodeListNode(Code* code): next_(NULL) {
1142 GlobalHandles* global_handles = Isolate::Current()->global_handles();
1143 // Globalize the code object and make it weak.
1144 code_ = Handle<Code>::cast(global_handles->Create(code));
1145 global_handles->MakeWeak(reinterpret_cast<Object**>(code_.location()),
1146 this,
1147 Deoptimizer::HandleWeakDeoptimizedCode);
1148 }
1149
1150
~DeoptimizingCodeListNode()1151 DeoptimizingCodeListNode::~DeoptimizingCodeListNode() {
1152 GlobalHandles* global_handles = Isolate::Current()->global_handles();
1153 global_handles->Destroy(reinterpret_cast<Object**>(code_.location()));
1154 }
1155
1156
1157 // We can't intermix stack decoding and allocations because
1158 // deoptimization infrastracture is not GC safe.
1159 // Thus we build a temporary structure in malloced space.
ComputeSlotForNextArgument(TranslationIterator * iterator,DeoptimizationInputData * data,JavaScriptFrame * frame)1160 SlotRef SlotRef::ComputeSlotForNextArgument(TranslationIterator* iterator,
1161 DeoptimizationInputData* data,
1162 JavaScriptFrame* frame) {
1163 Translation::Opcode opcode =
1164 static_cast<Translation::Opcode>(iterator->Next());
1165
1166 switch (opcode) {
1167 case Translation::BEGIN:
1168 case Translation::FRAME:
1169 // Peeled off before getting here.
1170 break;
1171
1172 case Translation::ARGUMENTS_OBJECT:
1173 // This can be only emitted for local slots not for argument slots.
1174 break;
1175
1176 case Translation::REGISTER:
1177 case Translation::INT32_REGISTER:
1178 case Translation::DOUBLE_REGISTER:
1179 case Translation::DUPLICATE:
1180 // We are at safepoint which corresponds to call. All registers are
1181 // saved by caller so there would be no live registers at this
1182 // point. Thus these translation commands should not be used.
1183 break;
1184
1185 case Translation::STACK_SLOT: {
1186 int slot_index = iterator->Next();
1187 Address slot_addr = SlotAddress(frame, slot_index);
1188 return SlotRef(slot_addr, SlotRef::TAGGED);
1189 }
1190
1191 case Translation::INT32_STACK_SLOT: {
1192 int slot_index = iterator->Next();
1193 Address slot_addr = SlotAddress(frame, slot_index);
1194 return SlotRef(slot_addr, SlotRef::INT32);
1195 }
1196
1197 case Translation::DOUBLE_STACK_SLOT: {
1198 int slot_index = iterator->Next();
1199 Address slot_addr = SlotAddress(frame, slot_index);
1200 return SlotRef(slot_addr, SlotRef::DOUBLE);
1201 }
1202
1203 case Translation::LITERAL: {
1204 int literal_index = iterator->Next();
1205 return SlotRef(data->LiteralArray()->get(literal_index));
1206 }
1207 }
1208
1209 UNREACHABLE();
1210 return SlotRef();
1211 }
1212
1213
ComputeSlotMappingForArguments(JavaScriptFrame * frame,int inlined_frame_index,Vector<SlotRef> * args_slots)1214 void SlotRef::ComputeSlotMappingForArguments(JavaScriptFrame* frame,
1215 int inlined_frame_index,
1216 Vector<SlotRef>* args_slots) {
1217 AssertNoAllocation no_gc;
1218 int deopt_index = AstNode::kNoNumber;
1219 DeoptimizationInputData* data =
1220 static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
1221 TranslationIterator it(data->TranslationByteArray(),
1222 data->TranslationIndex(deopt_index)->value());
1223 Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
1224 ASSERT(opcode == Translation::BEGIN);
1225 int frame_count = it.Next();
1226 USE(frame_count);
1227 ASSERT(frame_count > inlined_frame_index);
1228 int frames_to_skip = inlined_frame_index;
1229 while (true) {
1230 opcode = static_cast<Translation::Opcode>(it.Next());
1231 // Skip over operands to advance to the next opcode.
1232 it.Skip(Translation::NumberOfOperandsFor(opcode));
1233 if (opcode == Translation::FRAME) {
1234 if (frames_to_skip == 0) {
1235 // We reached the frame corresponding to the inlined function
1236 // in question. Process the translation commands for the
1237 // arguments.
1238 //
1239 // Skip the translation command for the receiver.
1240 it.Skip(Translation::NumberOfOperandsFor(
1241 static_cast<Translation::Opcode>(it.Next())));
1242 // Compute slots for arguments.
1243 for (int i = 0; i < args_slots->length(); ++i) {
1244 (*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
1245 }
1246 return;
1247 }
1248 frames_to_skip--;
1249 }
1250 }
1251
1252 UNREACHABLE();
1253 }
1254
1255
1256 } } // namespace v8::internal
1257