1 // Copyright 2022 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/maglev/maglev-code-generator.h"
6
7 #include "src/codegen/code-desc.h"
8 #include "src/codegen/register.h"
9 #include "src/codegen/safepoint-table.h"
10 #include "src/deoptimizer/translation-array.h"
11 #include "src/execution/frame-constants.h"
12 #include "src/interpreter/bytecode-register.h"
13 #include "src/maglev/maglev-code-gen-state.h"
14 #include "src/maglev/maglev-compilation-unit.h"
15 #include "src/maglev/maglev-graph-labeller.h"
16 #include "src/maglev/maglev-graph-printer.h"
17 #include "src/maglev/maglev-graph-processor.h"
18 #include "src/maglev/maglev-graph.h"
19 #include "src/maglev/maglev-ir.h"
20 #include "src/maglev/maglev-regalloc-data.h"
21 #include "src/objects/code-inl.h"
22
23 namespace v8 {
24 namespace internal {
25 namespace maglev {
26
27 #define __ masm()->
28
29 namespace {
30
31 template <typename T, size_t... Is>
repeat(T value,std::index_sequence<Is...>)32 std::array<T, sizeof...(Is)> repeat(T value, std::index_sequence<Is...>) {
33 return {((void)Is, value)...};
34 }
35
36 template <size_t N, typename T>
repeat(T value)37 std::array<T, N> repeat(T value) {
38 return repeat<T>(value, std::make_index_sequence<N>());
39 }
40
41 using RegisterMoves = std::array<Register, Register::kNumRegisters>;
42 using StackToRegisterMoves =
43 std::array<compiler::InstructionOperand, Register::kNumRegisters>;
44
45 class MaglevCodeGeneratingNodeProcessor {
46 public:
MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState * code_gen_state)47 explicit MaglevCodeGeneratingNodeProcessor(MaglevCodeGenState* code_gen_state)
48 : code_gen_state_(code_gen_state) {}
49
PreProcessGraph(MaglevCompilationUnit *,Graph * graph)50 void PreProcessGraph(MaglevCompilationUnit*, Graph* graph) {
51 if (FLAG_maglev_break_on_entry) {
52 __ int3();
53 }
54
55 __ BailoutIfDeoptimized(rbx);
56
57 __ EnterFrame(StackFrame::BASELINE);
58
59 // Save arguments in frame.
60 // TODO(leszeks): Consider eliding this frame if we don't make any calls
61 // that could clobber these registers.
62 __ Push(kContextRegister);
63 __ Push(kJSFunctionRegister); // Callee's JS function.
64 __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
65
66 // Extend rsp by the size of the frame.
67 code_gen_state_->SetVregSlots(graph->stack_slots());
68 __ subq(rsp, Immediate(code_gen_state_->vreg_slots() * kSystemPointerSize));
69
70 // Initialize stack slots.
71 // TODO(jgruber): Update logic once the register allocator is further along.
72 {
73 ASM_CODE_COMMENT_STRING(masm(), "Initializing stack slots");
74 __ Move(rax, Immediate(0));
75 __ Move(rcx, Immediate(code_gen_state_->vreg_slots()));
76 __ leaq(rdi, GetStackSlot(code_gen_state_->vreg_slots() - 1));
77 __ repstosq();
78 }
79
80 // We don't emit proper safepoint data yet; instead, define a single
81 // safepoint at the end of the code object.
82 // TODO(v8:7700): Add better safepoint handling when we support stack reuse.
83 SafepointTableBuilder::Safepoint safepoint =
84 safepoint_table_builder()->DefineSafepoint(masm());
85 code_gen_state_->DefineSafepointStackSlots(safepoint);
86 }
87
PostProcessGraph(MaglevCompilationUnit *,Graph *)88 void PostProcessGraph(MaglevCompilationUnit*, Graph*) {}
89
PreProcessBasicBlock(MaglevCompilationUnit *,BasicBlock * block)90 void PreProcessBasicBlock(MaglevCompilationUnit*, BasicBlock* block) {
91 if (FLAG_code_comments) {
92 std::stringstream ss;
93 ss << "-- Block b" << graph_labeller()->BlockId(block);
94 __ RecordComment(ss.str());
95 }
96
97 __ bind(block->label());
98 }
99
100 template <typename NodeT>
Process(NodeT * node,const ProcessingState & state)101 void Process(NodeT* node, const ProcessingState& state) {
102 if (FLAG_code_comments) {
103 std::stringstream ss;
104 ss << "-- " << graph_labeller()->NodeId(node) << ": "
105 << PrintNode(graph_labeller(), node);
106 __ RecordComment(ss.str());
107 }
108
109 // Emit Phi moves before visiting the control node.
110 if (std::is_base_of<UnconditionalControlNode, NodeT>::value) {
111 EmitBlockEndGapMoves(node->template Cast<UnconditionalControlNode>(),
112 state);
113 }
114
115 node->GenerateCode(code_gen_state_, state);
116
117 if (std::is_base_of<ValueNode, NodeT>::value) {
118 ValueNode* value_node = node->template Cast<ValueNode>();
119 if (value_node->is_spilled()) {
120 compiler::AllocatedOperand source =
121 compiler::AllocatedOperand::cast(value_node->result().operand());
122 // We shouldn't spill nodes which already output to the stack.
123 if (!source.IsStackSlot()) {
124 if (FLAG_code_comments) __ RecordComment("-- Spill:");
125 DCHECK(!source.IsStackSlot());
126 __ movq(GetStackSlot(value_node->spill_slot()), ToRegister(source));
127 } else {
128 // Otherwise, the result source stack slot should be equal to the
129 // spill slot.
130 DCHECK_EQ(source.index(), value_node->spill_slot().index());
131 }
132 }
133 }
134 }
135
EmitSingleParallelMove(Register source,Register target,RegisterMoves & moves)136 void EmitSingleParallelMove(Register source, Register target,
137 RegisterMoves& moves) {
138 DCHECK(!moves[target.code()].is_valid());
139 __ movq(target, source);
140 moves[source.code()] = Register::no_reg();
141 }
142
RecursivelyEmitParallelMoveChain(Register chain_start,Register source,Register target,RegisterMoves & moves)143 bool RecursivelyEmitParallelMoveChain(Register chain_start, Register source,
144 Register target, RegisterMoves& moves) {
145 if (target == chain_start) {
146 // The target of this move is the start of the move chain -- this
147 // means that there is a cycle, and we have to break it by moving
148 // the chain start into a temporary.
149
150 __ RecordComment("-- * Cycle");
151 EmitSingleParallelMove(target, kScratchRegister, moves);
152 EmitSingleParallelMove(source, target, moves);
153 return true;
154 }
155 bool is_cycle = false;
156 if (moves[target.code()].is_valid()) {
157 is_cycle = RecursivelyEmitParallelMoveChain(chain_start, target,
158 moves[target.code()], moves);
159 } else {
160 __ RecordComment("-- * Chain start");
161 }
162 if (is_cycle && source == chain_start) {
163 EmitSingleParallelMove(kScratchRegister, target, moves);
164 __ RecordComment("-- * end cycle");
165 } else {
166 EmitSingleParallelMove(source, target, moves);
167 }
168 return is_cycle;
169 }
170
EmitParallelMoveChain(Register source,RegisterMoves & moves)171 void EmitParallelMoveChain(Register source, RegisterMoves& moves) {
172 Register target = moves[source.code()];
173 if (!target.is_valid()) return;
174
175 DCHECK_NE(source, target);
176 RecursivelyEmitParallelMoveChain(source, source, target, moves);
177 }
178
EmitStackToRegisterGapMove(compiler::InstructionOperand source,Register target)179 void EmitStackToRegisterGapMove(compiler::InstructionOperand source,
180 Register target) {
181 if (!source.IsAllocated()) return;
182 __ movq(target, GetStackSlot(compiler::AllocatedOperand::cast(source)));
183 }
184
RecordGapMove(compiler::AllocatedOperand source,Register target_reg,RegisterMoves & register_moves,StackToRegisterMoves & stack_to_register_moves)185 void RecordGapMove(compiler::AllocatedOperand source, Register target_reg,
186 RegisterMoves& register_moves,
187 StackToRegisterMoves& stack_to_register_moves) {
188 if (source.IsStackSlot()) {
189 // For stack->reg moves, don't emit the move yet, but instead record the
190 // move in the set of stack-to-register moves, to be executed after the
191 // reg->reg parallel moves.
192 stack_to_register_moves[target_reg.code()] = source;
193 } else {
194 // For reg->reg moves, don't emit the move yet, but instead record the
195 // move in the set of parallel register moves, to be resolved later.
196 Register source_reg = ToRegister(source);
197 if (target_reg != source_reg) {
198 DCHECK(!register_moves[source_reg.code()].is_valid());
199 register_moves[source_reg.code()] = target_reg;
200 }
201 }
202 }
203
RecordGapMove(compiler::AllocatedOperand source,compiler::AllocatedOperand target,RegisterMoves & register_moves,StackToRegisterMoves & stack_to_register_moves)204 void RecordGapMove(compiler::AllocatedOperand source,
205 compiler::AllocatedOperand target,
206 RegisterMoves& register_moves,
207 StackToRegisterMoves& stack_to_register_moves) {
208 if (target.IsRegister()) {
209 RecordGapMove(source, ToRegister(target), register_moves,
210 stack_to_register_moves);
211 return;
212 }
213
214 // stack->stack and reg->stack moves should be executed before registers are
215 // clobbered by reg->reg or stack->reg, so emit them immediately.
216 if (source.IsRegister()) {
217 Register source_reg = ToRegister(source);
218 __ movq(GetStackSlot(target), source_reg);
219 } else {
220 __ movq(kScratchRegister, GetStackSlot(source));
221 __ movq(GetStackSlot(target), kScratchRegister);
222 }
223 }
224
EmitBlockEndGapMoves(UnconditionalControlNode * node,const ProcessingState & state)225 void EmitBlockEndGapMoves(UnconditionalControlNode* node,
226 const ProcessingState& state) {
227 BasicBlock* target = node->target();
228 if (!target->has_state()) {
229 __ RecordComment("-- Target has no state, must be a fallthrough");
230 return;
231 }
232
233 int predecessor_id = state.block()->predecessor_id();
234
235 // Save register moves in an array, so that we can resolve them as parallel
236 // moves. Note that the mapping is:
237 //
238 // register_moves[source] = target.
239 RegisterMoves register_moves =
240 repeat<Register::kNumRegisters>(Register::no_reg());
241
242 // Save stack to register moves in an array, so that we can execute them
243 // after the parallel moves have read the register values. Note that the
244 // mapping is:
245 //
246 // stack_to_register_moves[target] = source.
247 StackToRegisterMoves stack_to_register_moves;
248
249 __ RecordComment("-- Gap moves:");
250
251 for (auto entry : target->state()->register_state()) {
252 RegisterMerge* merge;
253 if (LoadMergeState(entry.state, &merge)) {
254 compiler::AllocatedOperand source = merge->operand(predecessor_id);
255 Register target_reg = entry.reg;
256
257 if (FLAG_code_comments) {
258 std::stringstream ss;
259 ss << "-- * " << source << " → " << target_reg;
260 __ RecordComment(ss.str());
261 }
262 RecordGapMove(source, target_reg, register_moves,
263 stack_to_register_moves);
264 }
265 }
266
267 if (target->has_phi()) {
268 Phi::List* phis = target->phis();
269 for (Phi* phi : *phis) {
270 compiler::AllocatedOperand source = compiler::AllocatedOperand::cast(
271 phi->input(state.block()->predecessor_id()).operand());
272 compiler::AllocatedOperand target =
273 compiler::AllocatedOperand::cast(phi->result().operand());
274 if (FLAG_code_comments) {
275 std::stringstream ss;
276 ss << "-- * " << source << " → " << target << " (n"
277 << graph_labeller()->NodeId(phi) << ")";
278 __ RecordComment(ss.str());
279 }
280 RecordGapMove(source, target, register_moves, stack_to_register_moves);
281 }
282 }
283
284 #define EMIT_MOVE_FOR_REG(Name) EmitParallelMoveChain(Name, register_moves);
285 ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
286 #undef EMIT_MOVE_FOR_REG
287
288 #define EMIT_MOVE_FOR_REG(Name) \
289 EmitStackToRegisterGapMove(stack_to_register_moves[Name.code()], Name);
290 ALLOCATABLE_GENERAL_REGISTERS(EMIT_MOVE_FOR_REG)
291 #undef EMIT_MOVE_FOR_REG
292 }
293
isolate() const294 Isolate* isolate() const { return code_gen_state_->isolate(); }
masm() const295 MacroAssembler* masm() const { return code_gen_state_->masm(); }
graph_labeller() const296 MaglevGraphLabeller* graph_labeller() const {
297 return code_gen_state_->graph_labeller();
298 }
safepoint_table_builder() const299 SafepointTableBuilder* safepoint_table_builder() const {
300 return code_gen_state_->safepoint_table_builder();
301 }
302
303 private:
304 MaglevCodeGenState* code_gen_state_;
305 };
306
DeoptStackSlotIndexFromFPOffset(int offset)307 constexpr int DeoptStackSlotIndexFromFPOffset(int offset) {
308 return 1 - offset / kSystemPointerSize;
309 }
310
DeoptStackSlotFromStackSlot(const compiler::AllocatedOperand & operand)311 int DeoptStackSlotFromStackSlot(const compiler::AllocatedOperand& operand) {
312 return DeoptStackSlotIndexFromFPOffset(
313 GetFramePointerOffsetForStackSlot(operand));
314 }
315
316 } // namespace
317
318 class MaglevCodeGeneratorImpl final {
319 public:
Generate(MaglevCompilationUnit * compilation_unit,Graph * graph)320 static MaybeHandle<Code> Generate(MaglevCompilationUnit* compilation_unit,
321 Graph* graph) {
322 return MaglevCodeGeneratorImpl(compilation_unit, graph).Generate();
323 }
324
325 private:
326 static constexpr int kFunctionLiteralIndex = 0;
327 static constexpr int kOptimizedOutConstantIndex = 1;
328
MaglevCodeGeneratorImpl(MaglevCompilationUnit * compilation_unit,Graph * graph)329 MaglevCodeGeneratorImpl(MaglevCompilationUnit* compilation_unit, Graph* graph)
330 : safepoint_table_builder_(compilation_unit->zone()),
331 translation_array_builder_(compilation_unit->zone()),
332 code_gen_state_(compilation_unit, safepoint_table_builder()),
333 processor_(compilation_unit, &code_gen_state_),
334 graph_(graph) {}
335
Generate()336 MaybeHandle<Code> Generate() {
337 EmitCode();
338 if (code_gen_state_.found_unsupported_code_paths()) return {};
339 EmitMetadata();
340 return BuildCodeObject();
341 }
342
EmitCode()343 void EmitCode() {
344 processor_.ProcessGraph(graph_);
345 EmitDeferredCode();
346 EmitDeopts();
347 }
348
EmitDeferredCode()349 void EmitDeferredCode() {
350 for (DeferredCodeInfo* deferred_code : code_gen_state_.deferred_code()) {
351 __ RecordComment("-- Deferred block");
352 __ bind(&deferred_code->deferred_code_label);
353 deferred_code->Generate(&code_gen_state_, &deferred_code->return_label);
354 __ Trap();
355 }
356 }
357
EmitDeopts()358 void EmitDeopts() {
359 deopt_exit_start_offset_ = __ pc_offset();
360
361 __ RecordComment("-- Non-lazy deopts");
362 for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
363 EmitEagerDeopt(deopt_info);
364
365 __ bind(&deopt_info->deopt_entry_label);
366 __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Eager, 0,
367 &deopt_info->deopt_entry_label,
368 DeoptimizeKind::kEager, nullptr, nullptr);
369 }
370
371 __ RecordComment("-- Lazy deopts");
372 int last_updated_safepoint = 0;
373 for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
374 EmitLazyDeopt(deopt_info);
375
376 __ bind(&deopt_info->deopt_entry_label);
377 __ CallForDeoptimization(Builtin::kDeoptimizationEntry_Lazy, 0,
378 &deopt_info->deopt_entry_label,
379 DeoptimizeKind::kLazy, nullptr, nullptr);
380
381 last_updated_safepoint =
382 safepoint_table_builder_.UpdateDeoptimizationInfo(
383 deopt_info->deopting_call_return_pc,
384 deopt_info->deopt_entry_label.pos(), last_updated_safepoint,
385 deopt_info->deopt_index);
386 }
387 }
388
EmitEagerDeopt(EagerDeoptInfo * deopt_info)389 void EmitEagerDeopt(EagerDeoptInfo* deopt_info) {
390 int frame_count = 1;
391 int jsframe_count = 1;
392 int update_feedback_count = 0;
393 deopt_info->deopt_index = translation_array_builder_.BeginTranslation(
394 frame_count, jsframe_count, update_feedback_count);
395
396 // Returns are used for updating an accumulator or register after a lazy
397 // deopt.
398 const int return_offset = 0;
399 const int return_count = 0;
400 translation_array_builder_.BeginInterpretedFrame(
401 deopt_info->state.bytecode_position, kFunctionLiteralIndex,
402 code_gen_state_.register_count(), return_offset, return_count);
403
404 EmitDeoptFrameValues(
405 *code_gen_state_.compilation_unit(), deopt_info->state.register_frame,
406 deopt_info->input_locations, interpreter::Register::invalid_value());
407 }
408
EmitLazyDeopt(LazyDeoptInfo * deopt_info)409 void EmitLazyDeopt(LazyDeoptInfo* deopt_info) {
410 int frame_count = 1;
411 int jsframe_count = 1;
412 int update_feedback_count = 0;
413 deopt_info->deopt_index = translation_array_builder_.BeginTranslation(
414 frame_count, jsframe_count, update_feedback_count);
415
416 // Return offsets are counted from the end of the translation frame, which
417 // is the array [parameters..., locals..., accumulator].
418 int return_offset;
419 if (deopt_info->result_location ==
420 interpreter::Register::virtual_accumulator()) {
421 return_offset = 0;
422 } else if (deopt_info->result_location.is_parameter()) {
423 // This is slightly tricky to reason about because of zero indexing and
424 // fence post errors. As an example, consider a frame with 2 locals and
425 // 2 parameters, where we want argument index 1 -- looking at the array
426 // in reverse order we have:
427 // [acc, r1, r0, a1, a0]
428 // ^
429 // and this calculation gives, correctly:
430 // 2 + 2 - 1 = 3
431 return_offset = code_gen_state_.register_count() +
432 code_gen_state_.parameter_count() -
433 deopt_info->result_location.ToParameterIndex();
434 } else {
435 return_offset = code_gen_state_.register_count() -
436 deopt_info->result_location.index();
437 }
438 // TODO(leszeks): Support lazy deopts with multiple return values.
439 int return_count = 1;
440 translation_array_builder_.BeginInterpretedFrame(
441 deopt_info->state.bytecode_position, kFunctionLiteralIndex,
442 code_gen_state_.register_count(), return_offset, return_count);
443
444 EmitDeoptFrameValues(
445 *code_gen_state_.compilation_unit(), deopt_info->state.register_frame,
446 deopt_info->input_locations, deopt_info->result_location);
447 }
448
EmitDeoptFrameSingleValue(ValueNode * value,const InputLocation & input_location)449 void EmitDeoptFrameSingleValue(ValueNode* value,
450 const InputLocation& input_location) {
451 const compiler::AllocatedOperand& operand =
452 compiler::AllocatedOperand::cast(input_location.operand());
453 if (operand.IsRegister()) {
454 if (value->properties().is_untagged_value()) {
455 translation_array_builder_.StoreInt32Register(operand.GetRegister());
456 } else {
457 translation_array_builder_.StoreRegister(operand.GetRegister());
458 }
459 } else {
460 if (value->properties().is_untagged_value()) {
461 translation_array_builder_.StoreInt32StackSlot(
462 DeoptStackSlotFromStackSlot(operand));
463 } else {
464 translation_array_builder_.StoreStackSlot(
465 DeoptStackSlotFromStackSlot(operand));
466 }
467 }
468 }
469
EmitDeoptFrameValues(const MaglevCompilationUnit & compilation_unit,const CompactInterpreterFrameState * checkpoint_state,const InputLocation * input_locations,interpreter::Register result_location)470 void EmitDeoptFrameValues(
471 const MaglevCompilationUnit& compilation_unit,
472 const CompactInterpreterFrameState* checkpoint_state,
473 const InputLocation* input_locations,
474 interpreter::Register result_location) {
475 // Closure
476 int closure_index = DeoptStackSlotIndexFromFPOffset(
477 StandardFrameConstants::kFunctionOffset);
478 translation_array_builder_.StoreStackSlot(closure_index);
479
480 // TODO(leszeks): The input locations array happens to be in the same order
481 // as parameters+locals+accumulator are accessed here. We should make this
482 // clearer and guard against this invariant failing.
483 const InputLocation* input_location = input_locations;
484
485 // Parameters
486 {
487 int i = 0;
488 checkpoint_state->ForEachParameter(
489 compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
490 DCHECK_EQ(reg.ToParameterIndex(), i);
491 if (reg != result_location) {
492 EmitDeoptFrameSingleValue(value, *input_location);
493 } else {
494 translation_array_builder_.StoreLiteral(
495 kOptimizedOutConstantIndex);
496 }
497 i++;
498 input_location++;
499 });
500 }
501
502 // Context
503 int context_index =
504 DeoptStackSlotIndexFromFPOffset(StandardFrameConstants::kContextOffset);
505 translation_array_builder_.StoreStackSlot(context_index);
506
507 // Locals
508 {
509 int i = 0;
510 checkpoint_state->ForEachLocal(
511 compilation_unit, [&](ValueNode* value, interpreter::Register reg) {
512 DCHECK_LE(i, reg.index());
513 if (reg == result_location) {
514 input_location++;
515 return;
516 }
517 while (i < reg.index()) {
518 translation_array_builder_.StoreLiteral(
519 kOptimizedOutConstantIndex);
520 i++;
521 }
522 DCHECK_EQ(i, reg.index());
523 EmitDeoptFrameSingleValue(value, *input_location);
524 i++;
525 input_location++;
526 });
527 while (i < code_gen_state_.register_count()) {
528 translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
529 i++;
530 }
531 }
532
533 // Accumulator
534 {
535 if (checkpoint_state->liveness()->AccumulatorIsLive() &&
536 result_location != interpreter::Register::virtual_accumulator()) {
537 ValueNode* value = checkpoint_state->accumulator(compilation_unit);
538 EmitDeoptFrameSingleValue(value, *input_location);
539 } else {
540 translation_array_builder_.StoreLiteral(kOptimizedOutConstantIndex);
541 }
542 }
543 }
544
EmitMetadata()545 void EmitMetadata() {
546 // Final alignment before starting on the metadata section.
547 masm()->Align(Code::kMetadataAlignment);
548
549 safepoint_table_builder()->Emit(masm(),
550 stack_slot_count_with_fixed_frame());
551 }
552
BuildCodeObject()553 MaybeHandle<Code> BuildCodeObject() {
554 CodeDesc desc;
555 static constexpr int kNoHandlerTableOffset = 0;
556 masm()->GetCode(isolate(), &desc, safepoint_table_builder(),
557 kNoHandlerTableOffset);
558 return Factory::CodeBuilder{isolate(), desc, CodeKind::MAGLEV}
559 .set_stack_slots(stack_slot_count_with_fixed_frame())
560 .set_deoptimization_data(GenerateDeoptimizationData())
561 .TryBuild();
562 }
563
GenerateDeoptimizationData()564 Handle<DeoptimizationData> GenerateDeoptimizationData() {
565 int eager_deopt_count =
566 static_cast<int>(code_gen_state_.eager_deopts().size());
567 int lazy_deopt_count =
568 static_cast<int>(code_gen_state_.lazy_deopts().size());
569 int deopt_count = lazy_deopt_count + eager_deopt_count;
570 if (deopt_count == 0) {
571 return DeoptimizationData::Empty(isolate());
572 }
573 Handle<DeoptimizationData> data =
574 DeoptimizationData::New(isolate(), deopt_count, AllocationType::kOld);
575
576 Handle<TranslationArray> translation_array =
577 translation_array_builder_.ToTranslationArray(isolate()->factory());
578
579 data->SetTranslationByteArray(*translation_array);
580 data->SetInlinedFunctionCount(Smi::zero());
581 // TODO(leszeks): Support optimization IDs
582 data->SetOptimizationId(Smi::zero());
583
584 DCHECK_NE(deopt_exit_start_offset_, -1);
585 data->SetDeoptExitStart(Smi::FromInt(deopt_exit_start_offset_));
586 data->SetEagerDeoptCount(Smi::FromInt(eager_deopt_count));
587 data->SetLazyDeoptCount(Smi::FromInt(lazy_deopt_count));
588
589 data->SetSharedFunctionInfo(
590 *code_gen_state_.compilation_unit()->shared_function_info().object());
591
592 // TODO(leszeks): Proper literals array.
593 Handle<DeoptimizationLiteralArray> literals =
594 isolate()->factory()->NewDeoptimizationLiteralArray(2);
595 literals->set(
596 kFunctionLiteralIndex,
597 *code_gen_state_.compilation_unit()->shared_function_info().object());
598 literals->set(kOptimizedOutConstantIndex,
599 ReadOnlyRoots(isolate()).optimized_out());
600 data->SetLiteralArray(*literals);
601
602 // TODO(leszeks): Fix once we have inlining.
603 Handle<PodArray<InliningPosition>> inlining_positions =
604 PodArray<InliningPosition>::New(isolate(), 0);
605 data->SetInliningPositions(*inlining_positions);
606
607 // TODO(leszeks): Fix once we have OSR.
608 BytecodeOffset osr_offset = BytecodeOffset::None();
609 data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
610 data->SetOsrPcOffset(Smi::FromInt(-1));
611
612 // Populate deoptimization entries.
613 int i = 0;
614 for (EagerDeoptInfo* deopt_info : code_gen_state_.eager_deopts()) {
615 DCHECK_NE(deopt_info->deopt_index, -1);
616 data->SetBytecodeOffset(i, deopt_info->state.bytecode_position);
617 data->SetTranslationIndex(i, Smi::FromInt(deopt_info->deopt_index));
618 data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label.pos()));
619 #ifdef DEBUG
620 data->SetNodeId(i, Smi::FromInt(i));
621 #endif // DEBUG
622 i++;
623 }
624 for (LazyDeoptInfo* deopt_info : code_gen_state_.lazy_deopts()) {
625 DCHECK_NE(deopt_info->deopt_index, -1);
626 data->SetBytecodeOffset(i, deopt_info->state.bytecode_position);
627 data->SetTranslationIndex(i, Smi::FromInt(deopt_info->deopt_index));
628 data->SetPc(i, Smi::FromInt(deopt_info->deopt_entry_label.pos()));
629 #ifdef DEBUG
630 data->SetNodeId(i, Smi::FromInt(i));
631 #endif // DEBUG
632 i++;
633 }
634
635 return data;
636 }
637
stack_slot_count() const638 int stack_slot_count() const { return code_gen_state_.vreg_slots(); }
stack_slot_count_with_fixed_frame() const639 int stack_slot_count_with_fixed_frame() const {
640 return stack_slot_count() + StandardFrameConstants::kFixedSlotCount;
641 }
642
isolate() const643 Isolate* isolate() const {
644 return code_gen_state_.compilation_unit()->isolate();
645 }
masm()646 MacroAssembler* masm() { return code_gen_state_.masm(); }
safepoint_table_builder()647 SafepointTableBuilder* safepoint_table_builder() {
648 return &safepoint_table_builder_;
649 }
translation_array_builder()650 TranslationArrayBuilder* translation_array_builder() {
651 return &translation_array_builder_;
652 }
653
654 SafepointTableBuilder safepoint_table_builder_;
655 TranslationArrayBuilder translation_array_builder_;
656 MaglevCodeGenState code_gen_state_;
657 GraphProcessor<MaglevCodeGeneratingNodeProcessor> processor_;
658 Graph* const graph_;
659
660 int deopt_exit_start_offset_ = -1;
661 };
662
663 // static
Generate(MaglevCompilationUnit * compilation_unit,Graph * graph)664 MaybeHandle<Code> MaglevCodeGenerator::Generate(
665 MaglevCompilationUnit* compilation_unit, Graph* graph) {
666 return MaglevCodeGeneratorImpl::Generate(compilation_unit, graph);
667 }
668
669 } // namespace maglev
670 } // namespace internal
671 } // namespace v8
672