1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/compiler/code-generator.h"
6
7 #include "src/address-map.h"
8 #include "src/assembler-inl.h"
9 #include "src/base/adapters.h"
10 #include "src/compiler/code-generator-impl.h"
11 #include "src/compiler/linkage.h"
12 #include "src/compiler/pipeline.h"
13 #include "src/compiler/wasm-compiler.h"
14 #include "src/eh-frame.h"
15 #include "src/frames.h"
16 #include "src/lsan.h"
17 #include "src/macro-assembler-inl.h"
18 #include "src/optimized-compilation-info.h"
19
20 namespace v8 {
21 namespace internal {
22 namespace compiler {
23
24 class CodeGenerator::JumpTable final : public ZoneObject {
25 public:
JumpTable(JumpTable * next,Label ** targets,size_t target_count)26 JumpTable(JumpTable* next, Label** targets, size_t target_count)
27 : next_(next), targets_(targets), target_count_(target_count) {}
28
label()29 Label* label() { return &label_; }
next() const30 JumpTable* next() const { return next_; }
targets() const31 Label** targets() const { return targets_; }
target_count() const32 size_t target_count() const { return target_count_; }
33
34 private:
35 Label label_;
36 JumpTable* const next_;
37 Label** const targets_;
38 size_t const target_count_;
39 };
40
CodeGenerator(Zone * codegen_zone,Frame * frame,Linkage * linkage,InstructionSequence * code,OptimizedCompilationInfo * info,Isolate * isolate,base::Optional<OsrHelper> osr_helper,int start_source_position,JumpOptimizationInfo * jump_opt,PoisoningMitigationLevel poisoning_level,const AssemblerOptions & options,int32_t builtin_index)41 CodeGenerator::CodeGenerator(
42 Zone* codegen_zone, Frame* frame, Linkage* linkage,
43 InstructionSequence* code, OptimizedCompilationInfo* info, Isolate* isolate,
44 base::Optional<OsrHelper> osr_helper, int start_source_position,
45 JumpOptimizationInfo* jump_opt, PoisoningMitigationLevel poisoning_level,
46 const AssemblerOptions& options, int32_t builtin_index)
47 : zone_(codegen_zone),
48 isolate_(isolate),
49 frame_access_state_(nullptr),
50 linkage_(linkage),
51 code_(code),
52 unwinding_info_writer_(zone()),
53 info_(info),
54 labels_(zone()->NewArray<Label>(code->InstructionBlockCount())),
55 current_block_(RpoNumber::Invalid()),
56 start_source_position_(start_source_position),
57 current_source_position_(SourcePosition::Unknown()),
58 tasm_(isolate, options, nullptr, 0, CodeObjectRequired::kNo),
59 resolver_(this),
60 safepoints_(zone()),
61 handlers_(zone()),
62 deoptimization_exits_(zone()),
63 deoptimization_states_(zone()),
64 deoptimization_literals_(zone()),
65 inlined_function_count_(0),
66 translations_(zone()),
67 handler_table_offset_(0),
68 last_lazy_deopt_pc_(0),
69 caller_registers_saved_(false),
70 jump_tables_(nullptr),
71 ools_(nullptr),
72 osr_helper_(osr_helper),
73 osr_pc_offset_(-1),
74 optimized_out_literal_id_(-1),
75 source_position_table_builder_(
76 SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
77 protected_instructions_(zone()),
78 result_(kSuccess),
79 poisoning_level_(poisoning_level),
80 block_starts_(zone()),
81 instr_starts_(zone()) {
82 for (int i = 0; i < code->InstructionBlockCount(); ++i) {
83 new (&labels_[i]) Label;
84 }
85 CreateFrameAccessState(frame);
86 CHECK_EQ(info->is_osr(), osr_helper_.has_value());
87 tasm_.set_jump_optimization_info(jump_opt);
88 Code::Kind code_kind = info->code_kind();
89 if (code_kind == Code::WASM_FUNCTION ||
90 code_kind == Code::WASM_TO_JS_FUNCTION ||
91 code_kind == Code::WASM_INTERPRETER_ENTRY ||
92 (Builtins::IsBuiltinId(builtin_index) &&
93 Builtins::IsWasmRuntimeStub(builtin_index))) {
94 tasm_.set_abort_hard(true);
95 }
96 tasm_.set_builtin_index(builtin_index);
97 }
98
wasm_runtime_exception_support() const99 bool CodeGenerator::wasm_runtime_exception_support() const {
100 DCHECK_NOT_NULL(info_);
101 return info_->wasm_runtime_exception_support();
102 }
103
AddProtectedInstructionLanding(uint32_t instr_offset,uint32_t landing_offset)104 void CodeGenerator::AddProtectedInstructionLanding(uint32_t instr_offset,
105 uint32_t landing_offset) {
106 protected_instructions_.push_back({instr_offset, landing_offset});
107 }
108
CreateFrameAccessState(Frame * frame)109 void CodeGenerator::CreateFrameAccessState(Frame* frame) {
110 FinishFrame(frame);
111 frame_access_state_ = new (zone()) FrameAccessState(frame);
112 }
113
AssembleDeoptimizerCall(int deoptimization_id,SourcePosition pos)114 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
115 int deoptimization_id, SourcePosition pos) {
116 DeoptimizeKind deopt_kind = GetDeoptimizationKind(deoptimization_id);
117 DeoptimizeReason deoptimization_reason =
118 GetDeoptimizationReason(deoptimization_id);
119 Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
120 tasm()->isolate(), deoptimization_id, deopt_kind);
121 if (deopt_entry == kNullAddress) return kTooManyDeoptimizationBailouts;
122 if (info()->is_source_positions_enabled()) {
123 tasm()->RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
124 }
125 tasm()->CallForDeoptimization(deopt_entry, deoptimization_id,
126 RelocInfo::RUNTIME_ENTRY);
127 return kSuccess;
128 }
129
AssembleCode()130 void CodeGenerator::AssembleCode() {
131 OptimizedCompilationInfo* info = this->info();
132
133 // Open a frame scope to indicate that there is a frame on the stack. The
134 // MANUAL indicates that the scope shouldn't actually generate code to set up
135 // the frame (that is done in AssemblePrologue).
136 FrameScope frame_scope(tasm(), StackFrame::MANUAL);
137
138 if (info->is_source_positions_enabled()) {
139 AssembleSourcePosition(start_source_position());
140 }
141
142 // Place function entry hook if requested to do so.
143 if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
144 ProfileEntryHookStub::MaybeCallEntryHookDelayed(tasm(), zone());
145 }
146
147 // Check that {kJavaScriptCallCodeStartRegister} has been set correctly.
148 if (FLAG_debug_code & (info->code_kind() == Code::OPTIMIZED_FUNCTION ||
149 info->code_kind() == Code::BYTECODE_HANDLER)) {
150 tasm()->RecordComment("-- Prologue: check code start register --");
151 AssembleCodeStartRegisterCheck();
152 }
153
154 // TODO(jupvfranco): This should be the first thing in the code, otherwise
155 // MaybeCallEntryHookDelayed may happen twice (for optimized and deoptimized
156 // code). We want to bailout only from JS functions, which are the only ones
157 // that are optimized.
158 if (info->IsOptimizing()) {
159 DCHECK(linkage()->GetIncomingDescriptor()->IsJSFunctionCall());
160 tasm()->RecordComment("-- Prologue: check for deoptimization --");
161 BailoutIfDeoptimized();
162 }
163
164 InitializeSpeculationPoison();
165
166 // Define deoptimization literals for all inlined functions.
167 DCHECK_EQ(0u, deoptimization_literals_.size());
168 for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
169 info->inlined_functions()) {
170 if (!inlined.shared_info.equals(info->shared_info())) {
171 int index = DefineDeoptimizationLiteral(
172 DeoptimizationLiteral(inlined.shared_info));
173 inlined.RegisterInlinedFunctionId(index);
174 }
175 }
176 inlined_function_count_ = deoptimization_literals_.size();
177
178 unwinding_info_writer_.SetNumberOfInstructionBlocks(
179 code()->InstructionBlockCount());
180
181 if (info->trace_turbo_json_enabled()) {
182 block_starts_.assign(code()->instruction_blocks().size(), -1);
183 instr_starts_.assign(code()->instructions().size(), -1);
184 }
185 // Assemble all non-deferred blocks, followed by deferred ones.
186 for (int deferred = 0; deferred < 2; ++deferred) {
187 for (const InstructionBlock* block : code()->instruction_blocks()) {
188 if (block->IsDeferred() == (deferred == 0)) {
189 continue;
190 }
191
192 // Align loop headers on 16-byte boundaries.
193 if (block->IsLoopHeader() && !tasm()->jump_optimization_info()) {
194 tasm()->Align(16);
195 }
196 if (info->trace_turbo_json_enabled()) {
197 block_starts_[block->rpo_number().ToInt()] = tasm()->pc_offset();
198 }
199 // Bind a label for a block.
200 current_block_ = block->rpo_number();
201 unwinding_info_writer_.BeginInstructionBlock(tasm()->pc_offset(), block);
202 if (FLAG_code_comments) {
203 Vector<char> buffer = Vector<char>::New(200);
204 char* buffer_start = buffer.start();
205 LSAN_IGNORE_OBJECT(buffer_start);
206
207 int next = SNPrintF(
208 buffer, "-- B%d start%s%s%s%s", block->rpo_number().ToInt(),
209 block->IsDeferred() ? " (deferred)" : "",
210 block->needs_frame() ? "" : " (no frame)",
211 block->must_construct_frame() ? " (construct frame)" : "",
212 block->must_deconstruct_frame() ? " (deconstruct frame)" : "");
213
214 buffer = buffer.SubVector(next, buffer.length());
215
216 if (block->IsLoopHeader()) {
217 next =
218 SNPrintF(buffer, " (loop up to %d)", block->loop_end().ToInt());
219 buffer = buffer.SubVector(next, buffer.length());
220 }
221 if (block->loop_header().IsValid()) {
222 next =
223 SNPrintF(buffer, " (in loop %d)", block->loop_header().ToInt());
224 buffer = buffer.SubVector(next, buffer.length());
225 }
226 SNPrintF(buffer, " --");
227 tasm()->RecordComment(buffer_start);
228 }
229
230 frame_access_state()->MarkHasFrame(block->needs_frame());
231
232 tasm()->bind(GetLabel(current_block_));
233
234 TryInsertBranchPoisoning(block);
235
236 if (block->must_construct_frame()) {
237 AssembleConstructFrame();
238 // We need to setup the root register after we assemble the prologue, to
239 // avoid clobbering callee saved registers in case of C linkage and
240 // using the roots.
241 // TODO(mtrofin): investigate how we can avoid doing this repeatedly.
242 if (linkage()->GetIncomingDescriptor()->InitializeRootRegister()) {
243 tasm()->InitializeRootRegister();
244 }
245 }
246
247 if (FLAG_enable_embedded_constant_pool && !block->needs_frame()) {
248 ConstantPoolUnavailableScope constant_pool_unavailable(tasm());
249 result_ = AssembleBlock(block);
250 } else {
251 result_ = AssembleBlock(block);
252 }
253 if (result_ != kSuccess) return;
254 unwinding_info_writer_.EndInstructionBlock(block);
255 }
256 }
257
258 // Assemble all out-of-line code.
259 if (ools_) {
260 tasm()->RecordComment("-- Out of line code --");
261 for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
262 tasm()->bind(ool->entry());
263 ool->Generate();
264 if (ool->exit()->is_bound()) tasm()->jmp(ool->exit());
265 }
266 }
267
268 // This nop operation is needed to ensure that the trampoline is not
269 // confused with the pc of the call before deoptimization.
270 // The test regress/regress-259 is an example of where we need it.
271 tasm()->nop();
272
273 // Assemble deoptimization exits.
274 int last_updated = 0;
275 for (DeoptimizationExit* exit : deoptimization_exits_) {
276 tasm()->bind(exit->label());
277 int trampoline_pc = tasm()->pc_offset();
278 int deoptimization_id = exit->deoptimization_id();
279 DeoptimizationState* ds = deoptimization_states_[deoptimization_id];
280
281 if (ds->kind() == DeoptimizeKind::kLazy) {
282 last_updated = safepoints()->UpdateDeoptimizationInfo(
283 ds->pc_offset(), trampoline_pc, last_updated);
284 }
285 result_ = AssembleDeoptimizerCall(deoptimization_id, exit->pos());
286 if (result_ != kSuccess) return;
287 }
288
289 FinishCode();
290
291 // Emit the jump tables.
292 if (jump_tables_) {
293 tasm()->Align(kPointerSize);
294 for (JumpTable* table = jump_tables_; table; table = table->next()) {
295 tasm()->bind(table->label());
296 AssembleJumpTable(table->targets(), table->target_count());
297 }
298 }
299
300 // The PerfJitLogger logs code up until here, excluding the safepoint
301 // table. Resolve the unwinding info now so it is aware of the same code size
302 // as reported by perf.
303 unwinding_info_writer_.Finish(tasm()->pc_offset());
304
305 safepoints()->Emit(tasm(), frame()->GetTotalFrameSlotCount());
306
307 // Emit the exception handler table.
308 if (!handlers_.empty()) {
309 handler_table_offset_ = HandlerTable::EmitReturnTableStart(
310 tasm(), static_cast<int>(handlers_.size()));
311 for (size_t i = 0; i < handlers_.size(); ++i) {
312 HandlerTable::EmitReturnEntry(tasm(), handlers_[i].pc_offset,
313 handlers_[i].handler->pos());
314 }
315 }
316
317 result_ = kSuccess;
318 }
319
TryInsertBranchPoisoning(const InstructionBlock * block)320 void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
321 // See if our predecessor was a basic block terminated by a branch_and_poison
322 // instruction. If yes, then perform the masking based on the flags.
323 if (block->PredecessorCount() != 1) return;
324 RpoNumber pred_rpo = (block->predecessors())[0];
325 const InstructionBlock* pred = code()->InstructionBlockAt(pred_rpo);
326 if (pred->code_start() == pred->code_end()) return;
327 Instruction* instr = code()->InstructionAt(pred->code_end() - 1);
328 FlagsMode mode = FlagsModeField::decode(instr->opcode());
329 switch (mode) {
330 case kFlags_branch_and_poison: {
331 BranchInfo branch;
332 RpoNumber target = ComputeBranchInfo(&branch, instr);
333 if (!target.IsValid()) {
334 // Non-trivial branch, add the masking code.
335 FlagsCondition condition = branch.condition;
336 if (branch.false_label == GetLabel(block->rpo_number())) {
337 condition = NegateFlagsCondition(condition);
338 }
339 AssembleBranchPoisoning(condition, instr);
340 }
341 break;
342 }
343 case kFlags_deoptimize_and_poison: {
344 UNREACHABLE();
345 break;
346 }
347 default:
348 break;
349 }
350 }
351
AssembleArchBinarySearchSwitchRange(Register input,RpoNumber def_block,std::pair<int32_t,Label * > * begin,std::pair<int32_t,Label * > * end)352 void CodeGenerator::AssembleArchBinarySearchSwitchRange(
353 Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
354 std::pair<int32_t, Label*>* end) {
355 if (end - begin < kBinarySearchSwitchMinimalCases) {
356 while (begin != end) {
357 tasm()->JumpIfEqual(input, begin->first, begin->second);
358 ++begin;
359 }
360 AssembleArchJump(def_block);
361 return;
362 }
363 auto middle = begin + (end - begin) / 2;
364 Label less_label;
365 tasm()->JumpIfLessThan(input, middle->first, &less_label);
366 AssembleArchBinarySearchSwitchRange(input, def_block, middle, end);
367 tasm()->bind(&less_label);
368 AssembleArchBinarySearchSwitchRange(input, def_block, begin, middle);
369 }
370
GetSourcePositionTable()371 OwnedVector<byte> CodeGenerator::GetSourcePositionTable() {
372 return source_position_table_builder_.ToSourcePositionTableVector();
373 }
374
375 OwnedVector<trap_handler::ProtectedInstructionData>
GetProtectedInstructions()376 CodeGenerator::GetProtectedInstructions() {
377 return OwnedVector<trap_handler::ProtectedInstructionData>::Of(
378 protected_instructions_);
379 }
380
FinalizeCode()381 MaybeHandle<Code> CodeGenerator::FinalizeCode() {
382 if (result_ != kSuccess) {
383 tasm()->AbortedCodeGeneration();
384 return MaybeHandle<Code>();
385 }
386
387 // Allocate the source position table.
388 Handle<ByteArray> source_positions =
389 source_position_table_builder_.ToSourcePositionTable(isolate());
390
391 // Allocate deoptimization data.
392 Handle<DeoptimizationData> deopt_data = GenerateDeoptimizationData();
393
394 // Allocate and install the code.
395 CodeDesc desc;
396 tasm()->GetCode(isolate(), &desc);
397 if (unwinding_info_writer_.eh_frame_writer()) {
398 unwinding_info_writer_.eh_frame_writer()->GetEhFrame(&desc);
399 }
400
401 MaybeHandle<Code> maybe_code = isolate()->factory()->TryNewCode(
402 desc, info()->code_kind(), Handle<Object>(), info()->builtin_index(),
403 source_positions, deopt_data, kMovable, info()->stub_key(), true,
404 frame()->GetTotalFrameSlotCount(), safepoints()->GetCodeOffset(),
405 handler_table_offset_);
406
407 Handle<Code> code;
408 if (!maybe_code.ToHandle(&code)) {
409 tasm()->AbortedCodeGeneration();
410 return MaybeHandle<Code>();
411 }
412 isolate()->counters()->total_compiled_code_size()->Increment(
413 code->raw_instruction_size());
414
415 LOG_CODE_EVENT(isolate(),
416 CodeLinePosInfoRecordEvent(code->raw_instruction_start(),
417 *source_positions));
418
419 return code;
420 }
421
422
IsNextInAssemblyOrder(RpoNumber block) const423 bool CodeGenerator::IsNextInAssemblyOrder(RpoNumber block) const {
424 return code()
425 ->InstructionBlockAt(current_block_)
426 ->ao_number()
427 .IsNext(code()->InstructionBlockAt(block)->ao_number());
428 }
429
430
RecordSafepoint(ReferenceMap * references,Safepoint::Kind kind,int arguments,Safepoint::DeoptMode deopt_mode)431 void CodeGenerator::RecordSafepoint(ReferenceMap* references,
432 Safepoint::Kind kind, int arguments,
433 Safepoint::DeoptMode deopt_mode) {
434 Safepoint safepoint =
435 safepoints()->DefineSafepoint(tasm(), kind, arguments, deopt_mode);
436 int stackSlotToSpillSlotDelta =
437 frame()->GetTotalFrameSlotCount() - frame()->GetSpillSlotCount();
438 for (const InstructionOperand& operand : references->reference_operands()) {
439 if (operand.IsStackSlot()) {
440 int index = LocationOperand::cast(operand).index();
441 DCHECK_LE(0, index);
442 // We might index values in the fixed part of the frame (i.e. the
443 // closure pointer or the context pointer); these are not spill slots
444 // and therefore don't work with the SafepointTable currently, but
445 // we also don't need to worry about them, since the GC has special
446 // knowledge about those fields anyway.
447 if (index < stackSlotToSpillSlotDelta) continue;
448 safepoint.DefinePointerSlot(index);
449 } else if (operand.IsRegister() && (kind & Safepoint::kWithRegisters)) {
450 Register reg = LocationOperand::cast(operand).GetRegister();
451 safepoint.DefinePointerRegister(reg);
452 }
453 }
454 }
455
IsMaterializableFromRoot(Handle<HeapObject> object,Heap::RootListIndex * index_return)456 bool CodeGenerator::IsMaterializableFromRoot(
457 Handle<HeapObject> object, Heap::RootListIndex* index_return) {
458 const CallDescriptor* incoming_descriptor =
459 linkage()->GetIncomingDescriptor();
460 if (incoming_descriptor->flags() & CallDescriptor::kCanUseRoots) {
461 Heap* heap = isolate()->heap();
462 return heap->IsRootHandle(object, index_return) &&
463 !heap->RootCanBeWrittenAfterInitialization(*index_return);
464 }
465 return false;
466 }
467
AssembleBlock(const InstructionBlock * block)468 CodeGenerator::CodeGenResult CodeGenerator::AssembleBlock(
469 const InstructionBlock* block) {
470 for (int i = block->code_start(); i < block->code_end(); ++i) {
471 if (info()->trace_turbo_json_enabled()) {
472 instr_starts_[i] = tasm()->pc_offset();
473 }
474 Instruction* instr = code()->InstructionAt(i);
475 CodeGenResult result = AssembleInstruction(instr, block);
476 if (result != kSuccess) return result;
477 }
478 return kSuccess;
479 }
480
IsValidPush(InstructionOperand source,CodeGenerator::PushTypeFlags push_type)481 bool CodeGenerator::IsValidPush(InstructionOperand source,
482 CodeGenerator::PushTypeFlags push_type) {
483 if (source.IsImmediate() &&
484 ((push_type & CodeGenerator::kImmediatePush) != 0)) {
485 return true;
486 }
487 if (source.IsRegister() &&
488 ((push_type & CodeGenerator::kRegisterPush) != 0)) {
489 return true;
490 }
491 if (source.IsStackSlot() &&
492 ((push_type & CodeGenerator::kStackSlotPush) != 0)) {
493 return true;
494 }
495 return false;
496 }
497
GetPushCompatibleMoves(Instruction * instr,PushTypeFlags push_type,ZoneVector<MoveOperands * > * pushes)498 void CodeGenerator::GetPushCompatibleMoves(Instruction* instr,
499 PushTypeFlags push_type,
500 ZoneVector<MoveOperands*>* pushes) {
501 pushes->clear();
502 for (int i = Instruction::FIRST_GAP_POSITION;
503 i <= Instruction::LAST_GAP_POSITION; ++i) {
504 Instruction::GapPosition inner_pos =
505 static_cast<Instruction::GapPosition>(i);
506 ParallelMove* parallel_move = instr->GetParallelMove(inner_pos);
507 if (parallel_move != nullptr) {
508 for (auto move : *parallel_move) {
509 InstructionOperand source = move->source();
510 InstructionOperand destination = move->destination();
511 int first_push_compatible_index =
512 V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK ? 1 : 0;
513 // If there are any moves from slots that will be overridden by pushes,
514 // then the full gap resolver must be used since optimization with
515 // pushes don't participate in the parallel move and might clobber
516 // values needed for the gap resolve.
517 if (source.IsStackSlot() &&
518 LocationOperand::cast(source).index() >=
519 first_push_compatible_index) {
520 pushes->clear();
521 return;
522 }
523 // TODO(danno): Right now, only consider moves from the FIRST gap for
524 // pushes. Theoretically, we could extract pushes for both gaps (there
525 // are cases where this happens), but the logic for that would also have
526 // to check to make sure that non-memory inputs to the pushes from the
527 // LAST gap don't get clobbered in the FIRST gap.
528 if (i == Instruction::FIRST_GAP_POSITION) {
529 if (destination.IsStackSlot() &&
530 LocationOperand::cast(destination).index() >=
531 first_push_compatible_index) {
532 int index = LocationOperand::cast(destination).index();
533 if (IsValidPush(source, push_type)) {
534 if (index >= static_cast<int>(pushes->size())) {
535 pushes->resize(index + 1);
536 }
537 (*pushes)[index] = move;
538 }
539 }
540 }
541 }
542 }
543 }
544
545 // For now, only support a set of continuous pushes at the end of the list.
546 size_t push_count_upper_bound = pushes->size();
547 size_t push_begin = push_count_upper_bound;
548 for (auto move : base::Reversed(*pushes)) {
549 if (move == nullptr) break;
550 push_begin--;
551 }
552 size_t push_count = pushes->size() - push_begin;
553 std::copy(pushes->begin() + push_begin,
554 pushes->begin() + push_begin + push_count, pushes->begin());
555 pushes->resize(push_count);
556 }
557
InferMove(InstructionOperand * source,InstructionOperand * destination)558 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferMove(
559 InstructionOperand* source, InstructionOperand* destination) {
560 if (source->IsConstant()) {
561 if (destination->IsAnyRegister()) {
562 return MoveType::kConstantToRegister;
563 } else {
564 DCHECK(destination->IsAnyStackSlot());
565 return MoveType::kConstantToStack;
566 }
567 }
568 DCHECK(LocationOperand::cast(source)->IsCompatible(
569 LocationOperand::cast(destination)));
570 if (source->IsAnyRegister()) {
571 if (destination->IsAnyRegister()) {
572 return MoveType::kRegisterToRegister;
573 } else {
574 DCHECK(destination->IsAnyStackSlot());
575 return MoveType::kRegisterToStack;
576 }
577 } else {
578 DCHECK(source->IsAnyStackSlot());
579 if (destination->IsAnyRegister()) {
580 return MoveType::kStackToRegister;
581 } else {
582 DCHECK(destination->IsAnyStackSlot());
583 return MoveType::kStackToStack;
584 }
585 }
586 }
587
InferSwap(InstructionOperand * source,InstructionOperand * destination)588 CodeGenerator::MoveType::Type CodeGenerator::MoveType::InferSwap(
589 InstructionOperand* source, InstructionOperand* destination) {
590 DCHECK(LocationOperand::cast(source)->IsCompatible(
591 LocationOperand::cast(destination)));
592 if (source->IsAnyRegister()) {
593 if (destination->IsAnyRegister()) {
594 return MoveType::kRegisterToRegister;
595 } else {
596 DCHECK(destination->IsAnyStackSlot());
597 return MoveType::kRegisterToStack;
598 }
599 } else {
600 DCHECK(source->IsAnyStackSlot());
601 DCHECK(destination->IsAnyStackSlot());
602 return MoveType::kStackToStack;
603 }
604 }
605
ComputeBranchInfo(BranchInfo * branch,Instruction * instr)606 RpoNumber CodeGenerator::ComputeBranchInfo(BranchInfo* branch,
607 Instruction* instr) {
608 // Assemble a branch after this instruction.
609 InstructionOperandConverter i(this, instr);
610 RpoNumber true_rpo = i.InputRpo(instr->InputCount() - 2);
611 RpoNumber false_rpo = i.InputRpo(instr->InputCount() - 1);
612
613 if (true_rpo == false_rpo) {
614 return true_rpo;
615 }
616 FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
617 if (IsNextInAssemblyOrder(true_rpo)) {
618 // true block is next, can fall through if condition negated.
619 std::swap(true_rpo, false_rpo);
620 condition = NegateFlagsCondition(condition);
621 }
622 branch->condition = condition;
623 branch->true_label = GetLabel(true_rpo);
624 branch->false_label = GetLabel(false_rpo);
625 branch->fallthru = IsNextInAssemblyOrder(false_rpo);
626 return RpoNumber::Invalid();
627 }
628
AssembleInstruction(Instruction * instr,const InstructionBlock * block)629 CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
630 Instruction* instr, const InstructionBlock* block) {
631 int first_unused_stack_slot;
632 FlagsMode mode = FlagsModeField::decode(instr->opcode());
633 if (mode != kFlags_trap) {
634 AssembleSourcePosition(instr);
635 }
636 bool adjust_stack =
637 GetSlotAboveSPBeforeTailCall(instr, &first_unused_stack_slot);
638 if (adjust_stack) AssembleTailCallBeforeGap(instr, first_unused_stack_slot);
639 AssembleGaps(instr);
640 if (adjust_stack) AssembleTailCallAfterGap(instr, first_unused_stack_slot);
641 DCHECK_IMPLIES(
642 block->must_deconstruct_frame(),
643 instr != code()->InstructionAt(block->last_instruction_index()) ||
644 instr->IsRet() || instr->IsJump());
645 if (instr->IsJump() && block->must_deconstruct_frame()) {
646 AssembleDeconstructFrame();
647 }
648 // Assemble architecture-specific code for the instruction.
649 CodeGenResult result = AssembleArchInstruction(instr);
650 if (result != kSuccess) return result;
651
652 FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
653 switch (mode) {
654 case kFlags_branch:
655 case kFlags_branch_and_poison: {
656 BranchInfo branch;
657 RpoNumber target = ComputeBranchInfo(&branch, instr);
658 if (target.IsValid()) {
659 // redundant branch.
660 if (!IsNextInAssemblyOrder(target)) {
661 AssembleArchJump(target);
662 }
663 return kSuccess;
664 }
665 // Assemble architecture-specific branch.
666 AssembleArchBranch(instr, &branch);
667 break;
668 }
669 case kFlags_deoptimize:
670 case kFlags_deoptimize_and_poison: {
671 // Assemble a conditional eager deoptimization after this instruction.
672 InstructionOperandConverter i(this, instr);
673 size_t frame_state_offset = MiscField::decode(instr->opcode());
674 DeoptimizationExit* const exit =
675 AddDeoptimizationExit(instr, frame_state_offset);
676 Label continue_label;
677 BranchInfo branch;
678 branch.condition = condition;
679 branch.true_label = exit->label();
680 branch.false_label = &continue_label;
681 branch.fallthru = true;
682 // Assemble architecture-specific branch.
683 AssembleArchDeoptBranch(instr, &branch);
684 tasm()->bind(&continue_label);
685 if (mode == kFlags_deoptimize_and_poison) {
686 AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
687 }
688 break;
689 }
690 case kFlags_set: {
691 // Assemble a boolean materialization after this instruction.
692 AssembleArchBoolean(instr, condition);
693 break;
694 }
695 case kFlags_trap: {
696 AssembleArchTrap(instr, condition);
697 break;
698 }
699 case kFlags_none: {
700 break;
701 }
702 }
703
704 // TODO(jarin) We should thread the flag through rather than set it.
705 if (instr->IsCall()) {
706 ResetSpeculationPoison();
707 }
708
709 return kSuccess;
710 }
711
AssembleSourcePosition(Instruction * instr)712 void CodeGenerator::AssembleSourcePosition(Instruction* instr) {
713 SourcePosition source_position = SourcePosition::Unknown();
714 if (instr->IsNop() && instr->AreMovesRedundant()) return;
715 if (!code()->GetSourcePosition(instr, &source_position)) return;
716 AssembleSourcePosition(source_position);
717 }
718
AssembleSourcePosition(SourcePosition source_position)719 void CodeGenerator::AssembleSourcePosition(SourcePosition source_position) {
720 if (source_position == current_source_position_) return;
721 current_source_position_ = source_position;
722 if (!source_position.IsKnown()) return;
723 source_position_table_builder_.AddPosition(tasm()->pc_offset(),
724 source_position, false);
725 if (FLAG_code_comments) {
726 OptimizedCompilationInfo* info = this->info();
727 if (info->IsStub()) return;
728 std::ostringstream buffer;
729 buffer << "-- ";
730 // Turbolizer only needs the source position, as it can reconstruct
731 // the inlining stack from other information.
732 if (info->trace_turbo_json_enabled() || !tasm()->isolate() ||
733 tasm()->isolate()->concurrent_recompilation_enabled()) {
734 buffer << source_position;
735 } else {
736 AllowHeapAllocation allocation;
737 AllowHandleAllocation handles;
738 AllowHandleDereference deref;
739 buffer << source_position.InliningStack(info);
740 }
741 buffer << " --";
742 char* str = StrDup(buffer.str().c_str());
743 LSAN_IGNORE_OBJECT(str);
744 tasm()->RecordComment(str);
745 }
746 }
747
GetSlotAboveSPBeforeTailCall(Instruction * instr,int * slot)748 bool CodeGenerator::GetSlotAboveSPBeforeTailCall(Instruction* instr,
749 int* slot) {
750 if (instr->IsTailCall()) {
751 InstructionOperandConverter g(this, instr);
752 *slot = g.InputInt32(instr->InputCount() - 1);
753 return true;
754 } else {
755 return false;
756 }
757 }
758
DetermineStubCallMode() const759 StubCallMode CodeGenerator::DetermineStubCallMode() const {
760 Code::Kind code_kind = info()->code_kind();
761 return (code_kind == Code::WASM_FUNCTION ||
762 code_kind == Code::WASM_TO_JS_FUNCTION)
763 ? StubCallMode::kCallWasmRuntimeStub
764 : StubCallMode::kCallOnHeapBuiltin;
765 }
766
AssembleGaps(Instruction * instr)767 void CodeGenerator::AssembleGaps(Instruction* instr) {
768 for (int i = Instruction::FIRST_GAP_POSITION;
769 i <= Instruction::LAST_GAP_POSITION; i++) {
770 Instruction::GapPosition inner_pos =
771 static_cast<Instruction::GapPosition>(i);
772 ParallelMove* move = instr->GetParallelMove(inner_pos);
773 if (move != nullptr) resolver()->Resolve(move);
774 }
775 }
776
777 namespace {
778
CreateInliningPositions(OptimizedCompilationInfo * info,Isolate * isolate)779 Handle<PodArray<InliningPosition>> CreateInliningPositions(
780 OptimizedCompilationInfo* info, Isolate* isolate) {
781 const OptimizedCompilationInfo::InlinedFunctionList& inlined_functions =
782 info->inlined_functions();
783 if (inlined_functions.size() == 0) {
784 return Handle<PodArray<InliningPosition>>::cast(
785 isolate->factory()->empty_byte_array());
786 }
787 Handle<PodArray<InliningPosition>> inl_positions =
788 PodArray<InliningPosition>::New(
789 isolate, static_cast<int>(inlined_functions.size()), TENURED);
790 for (size_t i = 0; i < inlined_functions.size(); ++i) {
791 inl_positions->set(static_cast<int>(i), inlined_functions[i].position);
792 }
793 return inl_positions;
794 }
795
796 } // namespace
797
GenerateDeoptimizationData()798 Handle<DeoptimizationData> CodeGenerator::GenerateDeoptimizationData() {
799 OptimizedCompilationInfo* info = this->info();
800 int deopt_count = static_cast<int>(deoptimization_states_.size());
801 if (deopt_count == 0 && !info->is_osr()) {
802 return DeoptimizationData::Empty(isolate());
803 }
804 Handle<DeoptimizationData> data =
805 DeoptimizationData::New(isolate(), deopt_count, TENURED);
806
807 Handle<ByteArray> translation_array =
808 translations_.CreateByteArray(isolate()->factory());
809
810 data->SetTranslationByteArray(*translation_array);
811 data->SetInlinedFunctionCount(
812 Smi::FromInt(static_cast<int>(inlined_function_count_)));
813 data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
814
815 if (info->has_shared_info()) {
816 data->SetSharedFunctionInfo(*info->shared_info());
817 } else {
818 data->SetSharedFunctionInfo(Smi::kZero);
819 }
820
821 Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
822 static_cast<int>(deoptimization_literals_.size()), TENURED);
823 for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
824 Handle<Object> object = deoptimization_literals_[i].Reify(isolate());
825 literals->set(i, *object);
826 }
827 data->SetLiteralArray(*literals);
828
829 Handle<PodArray<InliningPosition>> inl_pos =
830 CreateInliningPositions(info, isolate());
831 data->SetInliningPositions(*inl_pos);
832
833 if (info->is_osr()) {
834 DCHECK_LE(0, osr_pc_offset_);
835 data->SetOsrBytecodeOffset(Smi::FromInt(info_->osr_offset().ToInt()));
836 data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
837 } else {
838 BailoutId osr_offset = BailoutId::None();
839 data->SetOsrBytecodeOffset(Smi::FromInt(osr_offset.ToInt()));
840 data->SetOsrPcOffset(Smi::FromInt(-1));
841 }
842
843 // Populate deoptimization entries.
844 for (int i = 0; i < deopt_count; i++) {
845 DeoptimizationState* deoptimization_state = deoptimization_states_[i];
846 data->SetBytecodeOffset(i, deoptimization_state->bailout_id());
847 CHECK(deoptimization_state);
848 data->SetTranslationIndex(
849 i, Smi::FromInt(deoptimization_state->translation_id()));
850 data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
851 }
852
853 return data;
854 }
855
856
AddJumpTable(Label ** targets,size_t target_count)857 Label* CodeGenerator::AddJumpTable(Label** targets, size_t target_count) {
858 jump_tables_ = new (zone()) JumpTable(jump_tables_, targets, target_count);
859 return jump_tables_->label();
860 }
861
862
RecordCallPosition(Instruction * instr)863 void CodeGenerator::RecordCallPosition(Instruction* instr) {
864 CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
865
866 bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
867
868 RecordSafepoint(
869 instr->reference_map(), Safepoint::kSimple, 0,
870 needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
871
872 if (flags & CallDescriptor::kHasExceptionHandler) {
873 InstructionOperandConverter i(this, instr);
874 RpoNumber handler_rpo = i.InputRpo(instr->InputCount() - 1);
875 handlers_.push_back({GetLabel(handler_rpo), tasm()->pc_offset()});
876 }
877
878 if (needs_frame_state) {
879 MarkLazyDeoptSite();
880 // If the frame state is present, it starts at argument 2 - after
881 // the code address and the poison-alias index.
882 size_t frame_state_offset = 2;
883 FrameStateDescriptor* descriptor =
884 GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
885 int pc_offset = tasm()->pc_offset();
886 int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
887 descriptor->state_combine());
888
889 DeoptimizationExit* const exit = new (zone())
890 DeoptimizationExit(deopt_state_id, current_source_position_);
891 deoptimization_exits_.push_back(exit);
892 safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
893 }
894 }
895
DefineDeoptimizationLiteral(DeoptimizationLiteral literal)896 int CodeGenerator::DefineDeoptimizationLiteral(DeoptimizationLiteral literal) {
897 int result = static_cast<int>(deoptimization_literals_.size());
898 for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
899 if (deoptimization_literals_[i] == literal) return i;
900 }
901 deoptimization_literals_.push_back(literal);
902 return result;
903 }
904
GetDeoptimizationEntry(Instruction * instr,size_t frame_state_offset)905 DeoptimizationEntry const& CodeGenerator::GetDeoptimizationEntry(
906 Instruction* instr, size_t frame_state_offset) {
907 InstructionOperandConverter i(this, instr);
908 int const state_id = i.InputInt32(frame_state_offset);
909 return code()->GetDeoptimizationEntry(state_id);
910 }
911
GetDeoptimizationKind(int deoptimization_id) const912 DeoptimizeKind CodeGenerator::GetDeoptimizationKind(
913 int deoptimization_id) const {
914 size_t const index = static_cast<size_t>(deoptimization_id);
915 DCHECK_LT(index, deoptimization_states_.size());
916 return deoptimization_states_[index]->kind();
917 }
918
GetDeoptimizationReason(int deoptimization_id) const919 DeoptimizeReason CodeGenerator::GetDeoptimizationReason(
920 int deoptimization_id) const {
921 size_t const index = static_cast<size_t>(deoptimization_id);
922 DCHECK_LT(index, deoptimization_states_.size());
923 return deoptimization_states_[index]->reason();
924 }
925
TranslateStateValueDescriptor(StateValueDescriptor * desc,StateValueList * nested,Translation * translation,InstructionOperandIterator * iter)926 void CodeGenerator::TranslateStateValueDescriptor(
927 StateValueDescriptor* desc, StateValueList* nested,
928 Translation* translation, InstructionOperandIterator* iter) {
929 // Note:
930 // If translation is null, we just skip the relevant instruction operands.
931 if (desc->IsNested()) {
932 if (translation != nullptr) {
933 translation->BeginCapturedObject(static_cast<int>(nested->size()));
934 }
935 for (auto field : *nested) {
936 TranslateStateValueDescriptor(field.desc, field.nested, translation,
937 iter);
938 }
939 } else if (desc->IsArgumentsElements()) {
940 if (translation != nullptr) {
941 translation->ArgumentsElements(desc->arguments_type());
942 }
943 } else if (desc->IsArgumentsLength()) {
944 if (translation != nullptr) {
945 translation->ArgumentsLength(desc->arguments_type());
946 }
947 } else if (desc->IsDuplicate()) {
948 if (translation != nullptr) {
949 translation->DuplicateObject(static_cast<int>(desc->id()));
950 }
951 } else if (desc->IsPlain()) {
952 InstructionOperand* op = iter->Advance();
953 if (translation != nullptr) {
954 AddTranslationForOperand(translation, iter->instruction(), op,
955 desc->type());
956 }
957 } else {
958 DCHECK(desc->IsOptimizedOut());
959 if (translation != nullptr) {
960 if (optimized_out_literal_id_ == -1) {
961 optimized_out_literal_id_ = DefineDeoptimizationLiteral(
962 DeoptimizationLiteral(isolate()->factory()->optimized_out()));
963 }
964 translation->StoreLiteral(optimized_out_literal_id_);
965 }
966 }
967 }
968
969
TranslateFrameStateDescriptorOperands(FrameStateDescriptor * desc,InstructionOperandIterator * iter,OutputFrameStateCombine combine,Translation * translation)970 void CodeGenerator::TranslateFrameStateDescriptorOperands(
971 FrameStateDescriptor* desc, InstructionOperandIterator* iter,
972 OutputFrameStateCombine combine, Translation* translation) {
973 size_t index = 0;
974 StateValueList* values = desc->GetStateValueDescriptors();
975 for (StateValueList::iterator it = values->begin(); it != values->end();
976 ++it, ++index) {
977 StateValueDescriptor* value_desc = (*it).desc;
978 if (!combine.IsOutputIgnored()) {
979 // The result of the call should be placed at position
980 // [index_from_top] in the stack (overwriting whatever was
981 // previously there).
982 size_t index_from_top = desc->GetSize() - 1 - combine.GetOffsetToPokeAt();
983 if (index >= index_from_top &&
984 index < index_from_top + iter->instruction()->OutputCount()) {
985 DCHECK_NOT_NULL(translation);
986 AddTranslationForOperand(
987 translation, iter->instruction(),
988 iter->instruction()->OutputAt(index - index_from_top),
989 MachineType::AnyTagged());
990 // Skip the instruction operands.
991 TranslateStateValueDescriptor(value_desc, (*it).nested, nullptr, iter);
992 continue;
993 }
994 }
995 TranslateStateValueDescriptor(value_desc, (*it).nested, translation, iter);
996 }
997 DCHECK_EQ(desc->GetSize(), index);
998 }
999
1000
BuildTranslationForFrameStateDescriptor(FrameStateDescriptor * descriptor,InstructionOperandIterator * iter,Translation * translation,OutputFrameStateCombine state_combine)1001 void CodeGenerator::BuildTranslationForFrameStateDescriptor(
1002 FrameStateDescriptor* descriptor, InstructionOperandIterator* iter,
1003 Translation* translation, OutputFrameStateCombine state_combine) {
1004 // Outer-most state must be added to translation first.
1005 if (descriptor->outer_state() != nullptr) {
1006 BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), iter,
1007 translation,
1008 OutputFrameStateCombine::Ignore());
1009 }
1010
1011 Handle<SharedFunctionInfo> shared_info;
1012 if (!descriptor->shared_info().ToHandle(&shared_info)) {
1013 if (!info()->has_shared_info()) {
1014 return; // Stub with no SharedFunctionInfo.
1015 }
1016 shared_info = info()->shared_info();
1017 }
1018 int shared_info_id =
1019 DefineDeoptimizationLiteral(DeoptimizationLiteral(shared_info));
1020
1021 switch (descriptor->type()) {
1022 case FrameStateType::kInterpretedFunction:
1023 translation->BeginInterpretedFrame(
1024 descriptor->bailout_id(), shared_info_id,
1025 static_cast<unsigned int>(descriptor->locals_count() + 1));
1026 break;
1027 case FrameStateType::kArgumentsAdaptor:
1028 translation->BeginArgumentsAdaptorFrame(
1029 shared_info_id,
1030 static_cast<unsigned int>(descriptor->parameters_count()));
1031 break;
1032 case FrameStateType::kConstructStub:
1033 DCHECK(descriptor->bailout_id().IsValidForConstructStub());
1034 translation->BeginConstructStubFrame(
1035 descriptor->bailout_id(), shared_info_id,
1036 static_cast<unsigned int>(descriptor->parameters_count()));
1037 break;
1038 case FrameStateType::kBuiltinContinuation: {
1039 BailoutId bailout_id = descriptor->bailout_id();
1040 int parameter_count =
1041 static_cast<unsigned int>(descriptor->parameters_count());
1042 translation->BeginBuiltinContinuationFrame(bailout_id, shared_info_id,
1043 parameter_count);
1044 break;
1045 }
1046 case FrameStateType::kJavaScriptBuiltinContinuation: {
1047 BailoutId bailout_id = descriptor->bailout_id();
1048 int parameter_count =
1049 static_cast<unsigned int>(descriptor->parameters_count());
1050 translation->BeginJavaScriptBuiltinContinuationFrame(
1051 bailout_id, shared_info_id, parameter_count);
1052 break;
1053 }
1054 case FrameStateType::kJavaScriptBuiltinContinuationWithCatch: {
1055 BailoutId bailout_id = descriptor->bailout_id();
1056 int parameter_count =
1057 static_cast<unsigned int>(descriptor->parameters_count());
1058 translation->BeginJavaScriptBuiltinContinuationWithCatchFrame(
1059 bailout_id, shared_info_id, parameter_count);
1060 break;
1061 }
1062 }
1063
1064 TranslateFrameStateDescriptorOperands(descriptor, iter, state_combine,
1065 translation);
1066 }
1067
1068
BuildTranslation(Instruction * instr,int pc_offset,size_t frame_state_offset,OutputFrameStateCombine state_combine)1069 int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
1070 size_t frame_state_offset,
1071 OutputFrameStateCombine state_combine) {
1072 DeoptimizationEntry const& entry =
1073 GetDeoptimizationEntry(instr, frame_state_offset);
1074 FrameStateDescriptor* const descriptor = entry.descriptor();
1075 frame_state_offset++;
1076
1077 int update_feedback_count = entry.feedback().IsValid() ? 1 : 0;
1078 Translation translation(&translations_,
1079 static_cast<int>(descriptor->GetFrameCount()),
1080 static_cast<int>(descriptor->GetJSFrameCount()),
1081 update_feedback_count, zone());
1082 if (entry.feedback().IsValid()) {
1083 DeoptimizationLiteral literal =
1084 DeoptimizationLiteral(entry.feedback().vector());
1085 int literal_id = DefineDeoptimizationLiteral(literal);
1086 translation.AddUpdateFeedback(literal_id, entry.feedback().slot().ToInt());
1087 }
1088 InstructionOperandIterator iter(instr, frame_state_offset);
1089 BuildTranslationForFrameStateDescriptor(descriptor, &iter, &translation,
1090 state_combine);
1091
1092 int deoptimization_id = static_cast<int>(deoptimization_states_.size());
1093
1094 deoptimization_states_.push_back(new (zone()) DeoptimizationState(
1095 descriptor->bailout_id(), translation.index(), pc_offset, entry.kind(),
1096 entry.reason()));
1097
1098 return deoptimization_id;
1099 }
1100
AddTranslationForOperand(Translation * translation,Instruction * instr,InstructionOperand * op,MachineType type)1101 void CodeGenerator::AddTranslationForOperand(Translation* translation,
1102 Instruction* instr,
1103 InstructionOperand* op,
1104 MachineType type) {
1105 if (op->IsStackSlot()) {
1106 if (type.representation() == MachineRepresentation::kBit) {
1107 translation->StoreBoolStackSlot(LocationOperand::cast(op)->index());
1108 } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1109 type == MachineType::Int32()) {
1110 translation->StoreInt32StackSlot(LocationOperand::cast(op)->index());
1111 } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1112 type == MachineType::Uint32()) {
1113 translation->StoreUint32StackSlot(LocationOperand::cast(op)->index());
1114 } else {
1115 CHECK_EQ(MachineRepresentation::kTagged, type.representation());
1116 translation->StoreStackSlot(LocationOperand::cast(op)->index());
1117 }
1118 } else if (op->IsFPStackSlot()) {
1119 if (type.representation() == MachineRepresentation::kFloat64) {
1120 translation->StoreDoubleStackSlot(LocationOperand::cast(op)->index());
1121 } else {
1122 CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1123 translation->StoreFloatStackSlot(LocationOperand::cast(op)->index());
1124 }
1125 } else if (op->IsRegister()) {
1126 InstructionOperandConverter converter(this, instr);
1127 if (type.representation() == MachineRepresentation::kBit) {
1128 translation->StoreBoolRegister(converter.ToRegister(op));
1129 } else if (type == MachineType::Int8() || type == MachineType::Int16() ||
1130 type == MachineType::Int32()) {
1131 translation->StoreInt32Register(converter.ToRegister(op));
1132 } else if (type == MachineType::Uint8() || type == MachineType::Uint16() ||
1133 type == MachineType::Uint32()) {
1134 translation->StoreUint32Register(converter.ToRegister(op));
1135 } else {
1136 CHECK_EQ(MachineRepresentation::kTagged, type.representation());
1137 translation->StoreRegister(converter.ToRegister(op));
1138 }
1139 } else if (op->IsFPRegister()) {
1140 InstructionOperandConverter converter(this, instr);
1141 if (type.representation() == MachineRepresentation::kFloat64) {
1142 translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
1143 } else {
1144 CHECK_EQ(MachineRepresentation::kFloat32, type.representation());
1145 translation->StoreFloatRegister(converter.ToFloatRegister(op));
1146 }
1147 } else {
1148 CHECK(op->IsImmediate());
1149 InstructionOperandConverter converter(this, instr);
1150 Constant constant = converter.ToConstant(op);
1151 DeoptimizationLiteral literal;
1152 switch (constant.type()) {
1153 case Constant::kInt32:
1154 if (type.representation() == MachineRepresentation::kTagged) {
1155 // When pointers are 4 bytes, we can use int32 constants to represent
1156 // Smis.
1157 DCHECK_EQ(4, kPointerSize);
1158 Smi* smi = reinterpret_cast<Smi*>(constant.ToInt32());
1159 DCHECK(smi->IsSmi());
1160 literal = DeoptimizationLiteral(smi->value());
1161 } else if (type.representation() == MachineRepresentation::kBit) {
1162 if (constant.ToInt32() == 0) {
1163 literal =
1164 DeoptimizationLiteral(isolate()->factory()->false_value());
1165 } else {
1166 DCHECK_EQ(1, constant.ToInt32());
1167 literal = DeoptimizationLiteral(isolate()->factory()->true_value());
1168 }
1169 } else {
1170 DCHECK(type == MachineType::Int32() ||
1171 type == MachineType::Uint32() ||
1172 type.representation() == MachineRepresentation::kWord32 ||
1173 type.representation() == MachineRepresentation::kNone);
1174 DCHECK(type.representation() != MachineRepresentation::kNone ||
1175 constant.ToInt32() == FrameStateDescriptor::kImpossibleValue);
1176 if (type == MachineType::Uint32()) {
1177 literal = DeoptimizationLiteral(
1178 static_cast<uint32_t>(constant.ToInt32()));
1179 } else {
1180 literal = DeoptimizationLiteral(constant.ToInt32());
1181 }
1182 }
1183 break;
1184 case Constant::kInt64:
1185 // When pointers are 8 bytes, we can use int64 constants to represent
1186 // Smis.
1187 DCHECK(type.representation() == MachineRepresentation::kWord64 ||
1188 type.representation() == MachineRepresentation::kTagged);
1189 DCHECK_EQ(8, kPointerSize);
1190 {
1191 Smi* smi = reinterpret_cast<Smi*>(constant.ToInt64());
1192 DCHECK(smi->IsSmi());
1193 literal = DeoptimizationLiteral(smi->value());
1194 }
1195 break;
1196 case Constant::kFloat32:
1197 DCHECK(type.representation() == MachineRepresentation::kFloat32 ||
1198 type.representation() == MachineRepresentation::kTagged);
1199 literal = DeoptimizationLiteral(constant.ToFloat32());
1200 break;
1201 case Constant::kFloat64:
1202 DCHECK(type.representation() == MachineRepresentation::kFloat64 ||
1203 type.representation() == MachineRepresentation::kTagged);
1204 literal = DeoptimizationLiteral(constant.ToFloat64().value());
1205 break;
1206 case Constant::kHeapObject:
1207 DCHECK_EQ(MachineRepresentation::kTagged, type.representation());
1208 literal = DeoptimizationLiteral(constant.ToHeapObject());
1209 break;
1210 default:
1211 UNREACHABLE();
1212 }
1213 if (literal.object().equals(info()->closure())) {
1214 translation->StoreJSFrameFunction();
1215 } else {
1216 int literal_id = DefineDeoptimizationLiteral(literal);
1217 translation->StoreLiteral(literal_id);
1218 }
1219 }
1220 }
1221
MarkLazyDeoptSite()1222 void CodeGenerator::MarkLazyDeoptSite() {
1223 last_lazy_deopt_pc_ = tasm()->pc_offset();
1224 }
1225
AddDeoptimizationExit(Instruction * instr,size_t frame_state_offset)1226 DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
1227 Instruction* instr, size_t frame_state_offset) {
1228 int const deoptimization_id = BuildTranslation(
1229 instr, -1, frame_state_offset, OutputFrameStateCombine::Ignore());
1230
1231 DeoptimizationExit* const exit = new (zone())
1232 DeoptimizationExit(deoptimization_id, current_source_position_);
1233 deoptimization_exits_.push_back(exit);
1234 return exit;
1235 }
1236
InitializeSpeculationPoison()1237 void CodeGenerator::InitializeSpeculationPoison() {
1238 if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
1239
1240 // Initialize {kSpeculationPoisonRegister} either by comparing the expected
1241 // with the actual call target, or by unconditionally using {-1} initially.
1242 // Masking register arguments with it only makes sense in the first case.
1243 if (info()->called_with_code_start_register()) {
1244 tasm()->RecordComment("-- Prologue: generate speculation poison --");
1245 GenerateSpeculationPoisonFromCodeStartRegister();
1246 if (info()->is_poisoning_register_arguments()) {
1247 AssembleRegisterArgumentPoisoning();
1248 }
1249 } else {
1250 ResetSpeculationPoison();
1251 }
1252 }
1253
ResetSpeculationPoison()1254 void CodeGenerator::ResetSpeculationPoison() {
1255 if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
1256 tasm()->ResetSpeculationPoisonRegister();
1257 }
1258 }
1259
OutOfLineCode(CodeGenerator * gen)1260 OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
1261 : frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
1262 gen->ools_ = this;
1263 }
1264
~OutOfLineCode()1265 OutOfLineCode::~OutOfLineCode() {}
1266
Reify(Isolate * isolate) const1267 Handle<Object> DeoptimizationLiteral::Reify(Isolate* isolate) const {
1268 return object_.is_null() ? isolate->factory()->NewNumber(number_) : object_;
1269 }
1270
1271 } // namespace compiler
1272 } // namespace internal
1273 } // namespace v8
1274