1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <cstdint>
6 #if V8_TARGET_ARCH_X64
7
8 #include "src/base/bits.h"
9 #include "src/base/division-by-constant.h"
10 #include "src/base/utils/random-number-generator.h"
11 #include "src/codegen/callable.h"
12 #include "src/codegen/code-factory.h"
13 #include "src/codegen/cpu-features.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/interface-descriptors-inl.h"
16 #include "src/codegen/macro-assembler.h"
17 #include "src/codegen/register-configuration.h"
18 #include "src/codegen/string-constants.h"
19 #include "src/codegen/x64/assembler-x64.h"
20 #include "src/codegen/x64/register-x64.h"
21 #include "src/common/globals.h"
22 #include "src/debug/debug.h"
23 #include "src/deoptimizer/deoptimizer.h"
24 #include "src/execution/frames-inl.h"
25 #include "src/heap/memory-chunk.h"
26 #include "src/init/bootstrapper.h"
27 #include "src/logging/counters.h"
28 #include "src/objects/objects-inl.h"
29 #include "src/objects/smi.h"
30 #include "src/sandbox/external-pointer.h"
31 #include "src/snapshot/snapshot.h"
32
33 // Satisfy cpplint check, but don't include platform-specific header. It is
34 // included recursively via macro-assembler.h.
35 #if 0
36 #include "src/codegen/x64/macro-assembler-x64.h"
37 #endif
38
39 namespace v8 {
40 namespace internal {
41
GetArgumentOperand(int index) const42 Operand StackArgumentsAccessor::GetArgumentOperand(int index) const {
43 DCHECK_GE(index, 0);
44 // arg[0] = rsp + kPCOnStackSize;
45 // arg[i] = arg[0] + i * kSystemPointerSize;
46 return Operand(rsp, kPCOnStackSize + index * kSystemPointerSize);
47 }
48
Load(Register destination,ExternalReference source)49 void MacroAssembler::Load(Register destination, ExternalReference source) {
50 if (root_array_available_ && options().enable_root_relative_access) {
51 intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
52 if (is_int32(delta)) {
53 movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
54 return;
55 }
56 }
57 // Safe code.
58 if (destination == rax && !options().isolate_independent_code) {
59 load_rax(source);
60 } else {
61 movq(destination, ExternalReferenceAsOperand(source));
62 }
63 }
64
Store(ExternalReference destination,Register source)65 void MacroAssembler::Store(ExternalReference destination, Register source) {
66 if (root_array_available_ && options().enable_root_relative_access) {
67 intptr_t delta =
68 RootRegisterOffsetForExternalReference(isolate(), destination);
69 if (is_int32(delta)) {
70 movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source);
71 return;
72 }
73 }
74 // Safe code.
75 if (source == rax && !options().isolate_independent_code) {
76 store_rax(destination);
77 } else {
78 movq(ExternalReferenceAsOperand(destination), source);
79 }
80 }
81
LoadFromConstantsTable(Register destination,int constant_index)82 void TurboAssembler::LoadFromConstantsTable(Register destination,
83 int constant_index) {
84 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
85 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
86 LoadTaggedPointerField(
87 destination,
88 FieldOperand(destination, FixedArray::OffsetOfElementAt(constant_index)));
89 }
90
LoadRootRegisterOffset(Register destination,intptr_t offset)91 void TurboAssembler::LoadRootRegisterOffset(Register destination,
92 intptr_t offset) {
93 DCHECK(is_int32(offset));
94 if (offset == 0) {
95 Move(destination, kRootRegister);
96 } else {
97 leaq(destination, Operand(kRootRegister, static_cast<int32_t>(offset)));
98 }
99 }
100
LoadRootRelative(Register destination,int32_t offset)101 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
102 movq(destination, Operand(kRootRegister, offset));
103 }
104
LoadAddress(Register destination,ExternalReference source)105 void TurboAssembler::LoadAddress(Register destination,
106 ExternalReference source) {
107 if (root_array_available_ && options().enable_root_relative_access) {
108 intptr_t delta = RootRegisterOffsetForExternalReference(isolate(), source);
109 if (is_int32(delta)) {
110 leaq(destination, Operand(kRootRegister, static_cast<int32_t>(delta)));
111 return;
112 }
113 }
114 // Safe code.
115 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
116 // non-isolate-independent code. In many cases it might be cheaper than
117 // embedding the relocatable value.
118 if (root_array_available_ && options().isolate_independent_code) {
119 IndirectLoadExternalReference(destination, source);
120 return;
121 }
122 Move(destination, source);
123 }
124
ExternalReferenceAsOperand(ExternalReference reference,Register scratch)125 Operand TurboAssembler::ExternalReferenceAsOperand(ExternalReference reference,
126 Register scratch) {
127 if (root_array_available_ && options().enable_root_relative_access) {
128 int64_t delta =
129 RootRegisterOffsetForExternalReference(isolate(), reference);
130 if (is_int32(delta)) {
131 return Operand(kRootRegister, static_cast<int32_t>(delta));
132 }
133 }
134 if (root_array_available_ && options().isolate_independent_code) {
135 if (IsAddressableThroughRootRegister(isolate(), reference)) {
136 // Some external references can be efficiently loaded as an offset from
137 // kRootRegister.
138 intptr_t offset =
139 RootRegisterOffsetForExternalReference(isolate(), reference);
140 CHECK(is_int32(offset));
141 return Operand(kRootRegister, static_cast<int32_t>(offset));
142 } else {
143 // Otherwise, do a memory load from the external reference table.
144 movq(scratch, Operand(kRootRegister,
145 RootRegisterOffsetForExternalReferenceTableEntry(
146 isolate(), reference)));
147 return Operand(scratch, 0);
148 }
149 }
150 Move(scratch, reference);
151 return Operand(scratch, 0);
152 }
153
PushAddress(ExternalReference source)154 void MacroAssembler::PushAddress(ExternalReference source) {
155 LoadAddress(kScratchRegister, source);
156 Push(kScratchRegister);
157 }
158
RootAsOperand(RootIndex index)159 Operand TurboAssembler::RootAsOperand(RootIndex index) {
160 DCHECK(root_array_available());
161 return Operand(kRootRegister, RootRegisterOffsetForRootIndex(index));
162 }
163
LoadRoot(Register destination,RootIndex index)164 void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
165 DCHECK(root_array_available_);
166 movq(destination, RootAsOperand(index));
167 }
168
PushRoot(RootIndex index)169 void MacroAssembler::PushRoot(RootIndex index) {
170 DCHECK(root_array_available_);
171 Push(RootAsOperand(index));
172 }
173
CompareRoot(Register with,RootIndex index)174 void TurboAssembler::CompareRoot(Register with, RootIndex index) {
175 DCHECK(root_array_available_);
176 if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
177 RootIndex::kLastStrongOrReadOnlyRoot)) {
178 cmp_tagged(with, RootAsOperand(index));
179 } else {
180 // Some smi roots contain system pointer size values like stack limits.
181 cmpq(with, RootAsOperand(index));
182 }
183 }
184
CompareRoot(Operand with,RootIndex index)185 void TurboAssembler::CompareRoot(Operand with, RootIndex index) {
186 DCHECK(root_array_available_);
187 DCHECK(!with.AddressUsesRegister(kScratchRegister));
188 LoadRoot(kScratchRegister, index);
189 if (base::IsInRange(index, RootIndex::kFirstStrongOrReadOnlyRoot,
190 RootIndex::kLastStrongOrReadOnlyRoot)) {
191 cmp_tagged(with, kScratchRegister);
192 } else {
193 // Some smi roots contain system pointer size values like stack limits.
194 cmpq(with, kScratchRegister);
195 }
196 }
197
LoadMap(Register destination,Register object)198 void TurboAssembler::LoadMap(Register destination, Register object) {
199 LoadTaggedPointerField(destination,
200 FieldOperand(object, HeapObject::kMapOffset));
201 #ifdef V8_MAP_PACKING
202 UnpackMapWord(destination);
203 #endif
204 }
205
LoadTaggedPointerField(Register destination,Operand field_operand)206 void TurboAssembler::LoadTaggedPointerField(Register destination,
207 Operand field_operand) {
208 if (COMPRESS_POINTERS_BOOL) {
209 DecompressTaggedPointer(destination, field_operand);
210 } else {
211 mov_tagged(destination, field_operand);
212 }
213 }
214
215 #ifdef V8_MAP_PACKING
UnpackMapWord(Register r)216 void TurboAssembler::UnpackMapWord(Register r) {
217 // Clear the top two bytes (which may include metadata). Must be in sync with
218 // MapWord::Unpack, and vice versa.
219 shlq(r, Immediate(16));
220 shrq(r, Immediate(16));
221 xorq(r, Immediate(Internals::kMapWordXorMask));
222 }
223 #endif
224
LoadTaggedSignedField(Register destination,Operand field_operand)225 void TurboAssembler::LoadTaggedSignedField(Register destination,
226 Operand field_operand) {
227 if (COMPRESS_POINTERS_BOOL) {
228 DecompressTaggedSigned(destination, field_operand);
229 } else {
230 mov_tagged(destination, field_operand);
231 }
232 }
233
LoadAnyTaggedField(Register destination,Operand field_operand)234 void TurboAssembler::LoadAnyTaggedField(Register destination,
235 Operand field_operand) {
236 if (COMPRESS_POINTERS_BOOL) {
237 DecompressAnyTagged(destination, field_operand);
238 } else {
239 mov_tagged(destination, field_operand);
240 }
241 }
242
PushTaggedPointerField(Operand field_operand,Register scratch)243 void TurboAssembler::PushTaggedPointerField(Operand field_operand,
244 Register scratch) {
245 if (COMPRESS_POINTERS_BOOL) {
246 DCHECK(!field_operand.AddressUsesRegister(scratch));
247 DecompressTaggedPointer(scratch, field_operand);
248 Push(scratch);
249 } else {
250 Push(field_operand);
251 }
252 }
253
PushTaggedAnyField(Operand field_operand,Register scratch)254 void TurboAssembler::PushTaggedAnyField(Operand field_operand,
255 Register scratch) {
256 if (COMPRESS_POINTERS_BOOL) {
257 DCHECK(!field_operand.AddressUsesRegister(scratch));
258 DecompressAnyTagged(scratch, field_operand);
259 Push(scratch);
260 } else {
261 Push(field_operand);
262 }
263 }
264
SmiUntagField(Register dst,Operand src)265 void TurboAssembler::SmiUntagField(Register dst, Operand src) {
266 SmiUntag(dst, src);
267 }
268
StoreTaggedField(Operand dst_field_operand,Immediate value)269 void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
270 Immediate value) {
271 if (COMPRESS_POINTERS_BOOL) {
272 movl(dst_field_operand, value);
273 } else {
274 movq(dst_field_operand, value);
275 }
276 }
277
StoreTaggedField(Operand dst_field_operand,Register value)278 void TurboAssembler::StoreTaggedField(Operand dst_field_operand,
279 Register value) {
280 if (COMPRESS_POINTERS_BOOL) {
281 movl(dst_field_operand, value);
282 } else {
283 movq(dst_field_operand, value);
284 }
285 }
286
StoreTaggedSignedField(Operand dst_field_operand,Smi value)287 void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
288 Smi value) {
289 if (SmiValuesAre32Bits()) {
290 Move(kScratchRegister, value);
291 movq(dst_field_operand, kScratchRegister);
292 } else {
293 StoreTaggedField(dst_field_operand, Immediate(value));
294 }
295 }
296
AtomicStoreTaggedField(Operand dst_field_operand,Register value)297 void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
298 Register value) {
299 if (COMPRESS_POINTERS_BOOL) {
300 movl(kScratchRegister, value);
301 xchgl(kScratchRegister, dst_field_operand);
302 } else {
303 movq(kScratchRegister, value);
304 xchgq(kScratchRegister, dst_field_operand);
305 }
306 }
307
DecompressTaggedSigned(Register destination,Operand field_operand)308 void TurboAssembler::DecompressTaggedSigned(Register destination,
309 Operand field_operand) {
310 ASM_CODE_COMMENT(this);
311 movl(destination, field_operand);
312 }
313
DecompressTaggedPointer(Register destination,Operand field_operand)314 void TurboAssembler::DecompressTaggedPointer(Register destination,
315 Operand field_operand) {
316 ASM_CODE_COMMENT(this);
317 movl(destination, field_operand);
318 addq(destination, kPtrComprCageBaseRegister);
319 }
320
DecompressTaggedPointer(Register destination,Register source)321 void TurboAssembler::DecompressTaggedPointer(Register destination,
322 Register source) {
323 ASM_CODE_COMMENT(this);
324 movl(destination, source);
325 addq(destination, kPtrComprCageBaseRegister);
326 }
327
DecompressAnyTagged(Register destination,Operand field_operand)328 void TurboAssembler::DecompressAnyTagged(Register destination,
329 Operand field_operand) {
330 ASM_CODE_COMMENT(this);
331 movl(destination, field_operand);
332 addq(destination, kPtrComprCageBaseRegister);
333 }
334
RecordWriteField(Register object,int offset,Register value,Register slot_address,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)335 void MacroAssembler::RecordWriteField(Register object, int offset,
336 Register value, Register slot_address,
337 SaveFPRegsMode save_fp,
338 RememberedSetAction remembered_set_action,
339 SmiCheck smi_check) {
340 ASM_CODE_COMMENT(this);
341 DCHECK(!AreAliased(object, value, slot_address));
342 // First, check if a write barrier is even needed. The tests below
343 // catch stores of Smis.
344 Label done;
345
346 // Skip barrier if writing a smi.
347 if (smi_check == SmiCheck::kInline) {
348 JumpIfSmi(value, &done);
349 }
350
351 // Although the object register is tagged, the offset is relative to the start
352 // of the object, so the offset must be a multiple of kTaggedSize.
353 DCHECK(IsAligned(offset, kTaggedSize));
354
355 leaq(slot_address, FieldOperand(object, offset));
356 if (FLAG_debug_code) {
357 ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
358 Label ok;
359 testb(slot_address, Immediate(kTaggedSize - 1));
360 j(zero, &ok, Label::kNear);
361 int3();
362 bind(&ok);
363 }
364
365 RecordWrite(object, slot_address, value, save_fp, remembered_set_action,
366 SmiCheck::kOmit);
367
368 bind(&done);
369
370 // Clobber clobbered input registers when running with the debug-code flag
371 // turned on to provoke errors.
372 if (FLAG_debug_code) {
373 ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
374 Move(value, kZapValue, RelocInfo::NO_INFO);
375 Move(slot_address, kZapValue, RelocInfo::NO_INFO);
376 }
377 }
378
EncodeSandboxedPointer(Register value)379 void TurboAssembler::EncodeSandboxedPointer(Register value) {
380 ASM_CODE_COMMENT(this);
381 #ifdef V8_SANDBOXED_POINTERS
382 subq(value, kPtrComprCageBaseRegister);
383 shlq(value, Immediate(kSandboxedPointerShift));
384 #else
385 UNREACHABLE();
386 #endif
387 }
388
DecodeSandboxedPointer(Register value)389 void TurboAssembler::DecodeSandboxedPointer(Register value) {
390 ASM_CODE_COMMENT(this);
391 #ifdef V8_SANDBOXED_POINTERS
392 shrq(value, Immediate(kSandboxedPointerShift));
393 addq(value, kPtrComprCageBaseRegister);
394 #else
395 UNREACHABLE();
396 #endif
397 }
398
LoadSandboxedPointerField(Register destination,Operand field_operand)399 void TurboAssembler::LoadSandboxedPointerField(Register destination,
400 Operand field_operand) {
401 ASM_CODE_COMMENT(this);
402 movq(destination, field_operand);
403 DecodeSandboxedPointer(destination);
404 }
405
StoreSandboxedPointerField(Operand dst_field_operand,Register value)406 void TurboAssembler::StoreSandboxedPointerField(Operand dst_field_operand,
407 Register value) {
408 ASM_CODE_COMMENT(this);
409 DCHECK(!AreAliased(value, kScratchRegister));
410 DCHECK(!dst_field_operand.AddressUsesRegister(kScratchRegister));
411 movq(kScratchRegister, value);
412 EncodeSandboxedPointer(kScratchRegister);
413 movq(dst_field_operand, kScratchRegister);
414 }
415
LoadExternalPointerField(Register destination,Operand field_operand,ExternalPointerTag tag,Register scratch,IsolateRootLocation isolateRootLocation)416 void TurboAssembler::LoadExternalPointerField(
417 Register destination, Operand field_operand, ExternalPointerTag tag,
418 Register scratch, IsolateRootLocation isolateRootLocation) {
419 DCHECK(!AreAliased(destination, scratch));
420 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
421 DCHECK_NE(kExternalPointerNullTag, tag);
422 DCHECK(!field_operand.AddressUsesRegister(scratch));
423 if (isolateRootLocation == IsolateRootLocation::kInRootRegister) {
424 DCHECK(root_array_available_);
425 movq(scratch, Operand(kRootRegister,
426 IsolateData::external_pointer_table_offset() +
427 Internals::kExternalPointerTableBufferOffset));
428 } else {
429 DCHECK(isolateRootLocation == IsolateRootLocation::kInScratchRegister);
430 movq(scratch,
431 Operand(scratch, IsolateData::external_pointer_table_offset() +
432 Internals::kExternalPointerTableBufferOffset));
433 }
434 movl(destination, field_operand);
435 shrq(destination, Immediate(kExternalPointerIndexShift));
436 movq(destination, Operand(scratch, destination, times_8, 0));
437 movq(scratch, Immediate64(~tag));
438 andq(destination, scratch);
439 #else
440 movq(destination, field_operand);
441 #endif // V8_SANDBOXED_EXTERNAL_POINTERS
442 }
443
MaybeSaveRegisters(RegList registers)444 void TurboAssembler::MaybeSaveRegisters(RegList registers) {
445 for (Register reg : registers) {
446 pushq(reg);
447 }
448 }
449
MaybeRestoreRegisters(RegList registers)450 void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
451 for (Register reg : base::Reversed(registers)) {
452 popq(reg);
453 }
454 }
455
CallEphemeronKeyBarrier(Register object,Register slot_address,SaveFPRegsMode fp_mode)456 void TurboAssembler::CallEphemeronKeyBarrier(Register object,
457 Register slot_address,
458 SaveFPRegsMode fp_mode) {
459 ASM_CODE_COMMENT(this);
460 DCHECK(!AreAliased(object, slot_address));
461 RegList registers =
462 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
463 MaybeSaveRegisters(registers);
464
465 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
466 Register slot_address_parameter =
467 WriteBarrierDescriptor::SlotAddressRegister();
468 MovePair(slot_address_parameter, slot_address, object_parameter, object);
469
470 Call(isolate()->builtins()->code_handle(
471 Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
472 RelocInfo::CODE_TARGET);
473 MaybeRestoreRegisters(registers);
474 }
475
CallRecordWriteStubSaveRegisters(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)476 void TurboAssembler::CallRecordWriteStubSaveRegisters(
477 Register object, Register slot_address,
478 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
479 StubCallMode mode) {
480 ASM_CODE_COMMENT(this);
481 DCHECK(!AreAliased(object, slot_address));
482 RegList registers =
483 WriteBarrierDescriptor::ComputeSavedRegisters(object, slot_address);
484 MaybeSaveRegisters(registers);
485 Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
486 Register slot_address_parameter =
487 WriteBarrierDescriptor::SlotAddressRegister();
488 MovePair(object_parameter, object, slot_address_parameter, slot_address);
489
490 CallRecordWriteStub(object_parameter, slot_address_parameter,
491 remembered_set_action, fp_mode, mode);
492 MaybeRestoreRegisters(registers);
493 }
494
CallRecordWriteStub(Register object,Register slot_address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,StubCallMode mode)495 void TurboAssembler::CallRecordWriteStub(
496 Register object, Register slot_address,
497 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
498 StubCallMode mode) {
499 ASM_CODE_COMMENT(this);
500 // Use CallRecordWriteStubSaveRegisters if the object and slot registers
501 // need to be caller saved.
502 DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
503 DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
504 #if V8_ENABLE_WEBASSEMBLY
505 if (mode == StubCallMode::kCallWasmRuntimeStub) {
506 // Use {near_call} for direct Wasm call within a module.
507 auto wasm_target =
508 wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
509 near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
510 #else
511 if (false) {
512 #endif
513 } else {
514 Builtin builtin =
515 Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
516 if (options().inline_offheap_trampolines) {
517 CallBuiltin(builtin);
518 } else {
519 Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
520 Call(code_target, RelocInfo::CODE_TARGET);
521 }
522 }
523 }
524
525 #ifdef V8_IS_TSAN
526 void TurboAssembler::CallTSANStoreStub(Register address, Register value,
527 SaveFPRegsMode fp_mode, int size,
528 StubCallMode mode,
529 std::memory_order order) {
530 ASM_CODE_COMMENT(this);
531 DCHECK(!AreAliased(address, value));
532 TSANStoreDescriptor descriptor;
533 RegList registers = descriptor.allocatable_registers();
534
535 MaybeSaveRegisters(registers);
536
537 Register address_parameter(
538 descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
539 Register value_parameter(
540 descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
541
542 // Prepare argument registers for calling GetTSANStoreStub.
543 MovePair(address_parameter, address, value_parameter, value);
544
545 if (isolate()) {
546 Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
547 Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
548 Call(code_target, RelocInfo::CODE_TARGET);
549 }
550 #if V8_ENABLE_WEBASSEMBLY
551 // There are two different kinds of wasm-to-js functions: one lives in the
552 // wasm code space, and another one lives on the heap. Both of them have the
553 // same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
554 // have to either use the wasm stub calls, or call the builtin using the
555 // isolate like JS does. In order to know which wasm-to-js function we are
556 // compiling right now, we check if the isolate is null.
557 // TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
558 // different CodeKinds and pass the CodeKind as a parameter so that we can use
559 // that instead of a nullptr check.
560 // NOLINTNEXTLINE(readability/braces)
561 else {
562 DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
563 // Use {near_call} for direct Wasm call within a module.
564 auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
565 near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
566 }
567 #endif // V8_ENABLE_WEBASSEMBLY
568
569 MaybeRestoreRegisters(registers);
570 }
571
572 void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
573 SaveFPRegsMode fp_mode, int size,
574 StubCallMode mode) {
575 TSANLoadDescriptor descriptor;
576 RegList registers = descriptor.allocatable_registers();
577
578 MaybeSaveRegisters(registers);
579
580 Register address_parameter(
581 descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
582
583 // Prepare argument registers for calling TSANRelaxedLoad.
584 Move(address_parameter, address);
585
586 if (isolate()) {
587 Builtin builtin = CodeFactory::GetTSANRelaxedLoadStub(fp_mode, size);
588 Handle<CodeT> code_target = isolate()->builtins()->code_handle(builtin);
589 Call(code_target, RelocInfo::CODE_TARGET);
590 }
591 #if V8_ENABLE_WEBASSEMBLY
592 // There are two different kinds of wasm-to-js functions: one lives in the
593 // wasm code space, and another one lives on the heap. Both of them have the
594 // same CodeKind (WASM_TO_JS_FUNCTION), but depending on where they are they
595 // have to either use the wasm stub calls, or call the builtin using the
596 // isolate like JS does. In order to know which wasm-to-js function we are
597 // compiling right now, we check if the isolate is null.
598 // TODO(solanes, v8:11600): Split CodeKind::WASM_TO_JS_FUNCTION into two
599 // different CodeKinds and pass the CodeKind as a parameter so that we can use
600 // that instead of a nullptr check.
601 // NOLINTNEXTLINE(readability/braces)
602 else {
603 DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
604 // Use {near_call} for direct Wasm call within a module.
605 auto wasm_target = wasm::WasmCode::GetTSANRelaxedLoadStub(fp_mode, size);
606 near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
607 }
608 #endif // V8_ENABLE_WEBASSEMBLY
609
610 MaybeRestoreRegisters(registers);
611 }
612 #endif // V8_IS_TSAN
613
614 void MacroAssembler::RecordWrite(Register object, Register slot_address,
615 Register value, SaveFPRegsMode fp_mode,
616 RememberedSetAction remembered_set_action,
617 SmiCheck smi_check) {
618 ASM_CODE_COMMENT(this);
619 DCHECK(!AreAliased(object, slot_address, value));
620 AssertNotSmi(object);
621
622 if ((remembered_set_action == RememberedSetAction::kOmit &&
623 !FLAG_incremental_marking) ||
624 FLAG_disable_write_barriers) {
625 return;
626 }
627
628 if (FLAG_debug_code) {
629 ASM_CODE_COMMENT_STRING(this, "Debug check slot_address");
630 Label ok;
631 cmp_tagged(value, Operand(slot_address, 0));
632 j(equal, &ok, Label::kNear);
633 int3();
634 bind(&ok);
635 }
636
637 // First, check if a write barrier is even needed. The tests below
638 // catch stores of smis and stores into the young generation.
639 Label done;
640
641 if (smi_check == SmiCheck::kInline) {
642 // Skip barrier if writing a smi.
643 JumpIfSmi(value, &done);
644 }
645
646 CheckPageFlag(value,
647 value, // Used as scratch.
648 MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
649 Label::kNear);
650
651 CheckPageFlag(object,
652 value, // Used as scratch.
653 MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
654 Label::kNear);
655
656 CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
657
658 bind(&done);
659
660 // Clobber clobbered registers when running with the debug-code flag
661 // turned on to provoke errors.
662 if (FLAG_debug_code) {
663 ASM_CODE_COMMENT_STRING(this, "Zap scratch registers");
664 Move(slot_address, kZapValue, RelocInfo::NO_INFO);
665 Move(value, kZapValue, RelocInfo::NO_INFO);
666 }
667 }
668
669 void TurboAssembler::Assert(Condition cc, AbortReason reason) {
670 if (FLAG_debug_code) Check(cc, reason);
671 }
672
673 void TurboAssembler::AssertUnreachable(AbortReason reason) {
674 if (FLAG_debug_code) Abort(reason);
675 }
676
677 void TurboAssembler::Check(Condition cc, AbortReason reason) {
678 Label L;
679 j(cc, &L, Label::kNear);
680 Abort(reason);
681 // Control will not return here.
682 bind(&L);
683 }
684
685 void TurboAssembler::CheckStackAlignment() {
686 int frame_alignment = base::OS::ActivationFrameAlignment();
687 int frame_alignment_mask = frame_alignment - 1;
688 if (frame_alignment > kSystemPointerSize) {
689 ASM_CODE_COMMENT(this);
690 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
691 Label alignment_as_expected;
692 testq(rsp, Immediate(frame_alignment_mask));
693 j(zero, &alignment_as_expected, Label::kNear);
694 // Abort if stack is not aligned.
695 int3();
696 bind(&alignment_as_expected);
697 }
698 }
699
700 void TurboAssembler::Abort(AbortReason reason) {
701 ASM_CODE_COMMENT(this);
702 if (FLAG_code_comments) {
703 const char* msg = GetAbortReason(reason);
704 RecordComment("Abort message: ");
705 RecordComment(msg);
706 }
707
708 // Avoid emitting call to builtin if requested.
709 if (trap_on_abort()) {
710 int3();
711 return;
712 }
713
714 if (should_abort_hard()) {
715 // We don't care if we constructed a frame. Just pretend we did.
716 FrameScope assume_frame(this, StackFrame::NO_FRAME_TYPE);
717 Move(arg_reg_1, static_cast<int>(reason));
718 PrepareCallCFunction(1);
719 LoadAddress(rax, ExternalReference::abort_with_reason());
720 call(rax);
721 return;
722 }
723
724 Move(rdx, Smi::FromInt(static_cast<int>(reason)));
725
726 if (!has_frame()) {
727 // We don't actually want to generate a pile of code for this, so just
728 // claim there is a stack frame, without generating one.
729 FrameScope scope(this, StackFrame::NO_FRAME_TYPE);
730 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
731 } else {
732 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
733 }
734 // Control will not return here.
735 int3();
736 }
737
738 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
739 SaveFPRegsMode save_doubles) {
740 ASM_CODE_COMMENT(this);
741 // If the expected number of arguments of the runtime function is
742 // constant, we check that the actual number of arguments match the
743 // expectation.
744 CHECK(f->nargs < 0 || f->nargs == num_arguments);
745
746 // TODO(1236192): Most runtime routines don't need the number of
747 // arguments passed in because it is constant. At some point we
748 // should remove this need and make the runtime routine entry code
749 // smarter.
750 Move(rax, num_arguments);
751 LoadAddress(rbx, ExternalReference::Create(f));
752 Handle<CodeT> code =
753 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
754 Call(code, RelocInfo::CODE_TARGET);
755 }
756
757 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
758 // ----------- S t a t e -------------
759 // -- rsp[0] : return address
760 // -- rsp[8] : argument num_arguments - 1
761 // ...
762 // -- rsp[8 * num_arguments] : argument 0 (receiver)
763 //
764 // For runtime functions with variable arguments:
765 // -- rax : number of arguments
766 // -----------------------------------
767 ASM_CODE_COMMENT(this);
768 const Runtime::Function* function = Runtime::FunctionForId(fid);
769 DCHECK_EQ(1, function->result_size);
770 if (function->nargs >= 0) {
771 Move(rax, function->nargs);
772 }
773 JumpToExternalReference(ExternalReference::Create(fid));
774 }
775
776 void MacroAssembler::JumpToExternalReference(const ExternalReference& ext,
777 bool builtin_exit_frame) {
778 ASM_CODE_COMMENT(this);
779 // Set the entry point and jump to the C entry runtime stub.
780 LoadAddress(rbx, ext);
781 Handle<CodeT> code =
782 CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
783 ArgvMode::kStack, builtin_exit_frame);
784 Jump(code, RelocInfo::CODE_TARGET);
785 }
786
787 static constexpr Register saved_regs[] = {rax, rcx, rdx, rbx, rbp, rsi,
788 rdi, r8, r9, r10, r11};
789
790 static constexpr int kNumberOfSavedRegs = sizeof(saved_regs) / sizeof(Register);
791
792 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
793 Register exclusion1,
794 Register exclusion2,
795 Register exclusion3) const {
796 int bytes = 0;
797 for (int i = 0; i < kNumberOfSavedRegs; i++) {
798 Register reg = saved_regs[i];
799 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
800 bytes += kSystemPointerSize;
801 }
802 }
803
804 // R12 to r15 are callee save on all platforms.
805 if (fp_mode == SaveFPRegsMode::kSave) {
806 bytes += kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
807 }
808
809 return bytes;
810 }
811
812 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
813 Register exclusion2, Register exclusion3) {
814 ASM_CODE_COMMENT(this);
815 // We don't allow a GC in a write barrier slow path so there is no need to
816 // store the registers in any particular way, but we do have to store and
817 // restore them.
818 int bytes = 0;
819 for (int i = 0; i < kNumberOfSavedRegs; i++) {
820 Register reg = saved_regs[i];
821 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
822 pushq(reg);
823 bytes += kSystemPointerSize;
824 }
825 }
826
827 // R12 to r15 are callee save on all platforms.
828 if (fp_mode == SaveFPRegsMode::kSave) {
829 const int delta = kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
830 AllocateStackSpace(delta);
831 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
832 XMMRegister reg = XMMRegister::from_code(i);
833 #if V8_ENABLE_WEBASSEMBLY
834 Movdqu(Operand(rsp, i * kStackSavedSavedFPSize), reg);
835 #else
836 Movsd(Operand(rsp, i * kStackSavedSavedFPSize), reg);
837 #endif // V8_ENABLE_WEBASSEMBLY
838 }
839 bytes += delta;
840 }
841
842 return bytes;
843 }
844
845 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
846 Register exclusion2, Register exclusion3) {
847 ASM_CODE_COMMENT(this);
848 int bytes = 0;
849 if (fp_mode == SaveFPRegsMode::kSave) {
850 for (int i = 0; i < XMMRegister::kNumRegisters; i++) {
851 XMMRegister reg = XMMRegister::from_code(i);
852 #if V8_ENABLE_WEBASSEMBLY
853 Movdqu(reg, Operand(rsp, i * kStackSavedSavedFPSize));
854 #else
855 Movsd(reg, Operand(rsp, i * kStackSavedSavedFPSize));
856 #endif // V8_ENABLE_WEBASSEMBLY
857 }
858 const int delta = kStackSavedSavedFPSize * XMMRegister::kNumRegisters;
859 addq(rsp, Immediate(delta));
860 bytes += delta;
861 }
862
863 for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) {
864 Register reg = saved_regs[i];
865 if (reg != exclusion1 && reg != exclusion2 && reg != exclusion3) {
866 popq(reg);
867 bytes += kSystemPointerSize;
868 }
869 }
870
871 return bytes;
872 }
873
874 void TurboAssembler::Movq(XMMRegister dst, Register src) {
875 if (CpuFeatures::IsSupported(AVX)) {
876 CpuFeatureScope avx_scope(this, AVX);
877 vmovq(dst, src);
878 } else {
879 movq(dst, src);
880 }
881 }
882
883 void TurboAssembler::Movq(Register dst, XMMRegister src) {
884 if (CpuFeatures::IsSupported(AVX)) {
885 CpuFeatureScope avx_scope(this, AVX);
886 vmovq(dst, src);
887 } else {
888 movq(dst, src);
889 }
890 }
891
892 void TurboAssembler::Pextrq(Register dst, XMMRegister src, int8_t imm8) {
893 if (CpuFeatures::IsSupported(AVX)) {
894 CpuFeatureScope avx_scope(this, AVX);
895 vpextrq(dst, src, imm8);
896 } else {
897 CpuFeatureScope sse_scope(this, SSE4_1);
898 pextrq(dst, src, imm8);
899 }
900 }
901
902 void TurboAssembler::Cvtss2sd(XMMRegister dst, XMMRegister src) {
903 if (CpuFeatures::IsSupported(AVX)) {
904 CpuFeatureScope scope(this, AVX);
905 vcvtss2sd(dst, src, src);
906 } else {
907 cvtss2sd(dst, src);
908 }
909 }
910
911 void TurboAssembler::Cvtss2sd(XMMRegister dst, Operand src) {
912 if (CpuFeatures::IsSupported(AVX)) {
913 CpuFeatureScope scope(this, AVX);
914 vcvtss2sd(dst, dst, src);
915 } else {
916 cvtss2sd(dst, src);
917 }
918 }
919
920 void TurboAssembler::Cvtsd2ss(XMMRegister dst, XMMRegister src) {
921 if (CpuFeatures::IsSupported(AVX)) {
922 CpuFeatureScope scope(this, AVX);
923 vcvtsd2ss(dst, src, src);
924 } else {
925 cvtsd2ss(dst, src);
926 }
927 }
928
929 void TurboAssembler::Cvtsd2ss(XMMRegister dst, Operand src) {
930 if (CpuFeatures::IsSupported(AVX)) {
931 CpuFeatureScope scope(this, AVX);
932 vcvtsd2ss(dst, dst, src);
933 } else {
934 cvtsd2ss(dst, src);
935 }
936 }
937
938 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Register src) {
939 if (CpuFeatures::IsSupported(AVX)) {
940 CpuFeatureScope scope(this, AVX);
941 vcvtlsi2sd(dst, kScratchDoubleReg, src);
942 } else {
943 xorpd(dst, dst);
944 cvtlsi2sd(dst, src);
945 }
946 }
947
948 void TurboAssembler::Cvtlsi2sd(XMMRegister dst, Operand src) {
949 if (CpuFeatures::IsSupported(AVX)) {
950 CpuFeatureScope scope(this, AVX);
951 vcvtlsi2sd(dst, kScratchDoubleReg, src);
952 } else {
953 xorpd(dst, dst);
954 cvtlsi2sd(dst, src);
955 }
956 }
957
958 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
959 if (CpuFeatures::IsSupported(AVX)) {
960 CpuFeatureScope scope(this, AVX);
961 vcvtlsi2ss(dst, kScratchDoubleReg, src);
962 } else {
963 xorps(dst, dst);
964 cvtlsi2ss(dst, src);
965 }
966 }
967
968 void TurboAssembler::Cvtlsi2ss(XMMRegister dst, Operand src) {
969 if (CpuFeatures::IsSupported(AVX)) {
970 CpuFeatureScope scope(this, AVX);
971 vcvtlsi2ss(dst, kScratchDoubleReg, src);
972 } else {
973 xorps(dst, dst);
974 cvtlsi2ss(dst, src);
975 }
976 }
977
978 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
979 if (CpuFeatures::IsSupported(AVX)) {
980 CpuFeatureScope scope(this, AVX);
981 vcvtqsi2ss(dst, kScratchDoubleReg, src);
982 } else {
983 xorps(dst, dst);
984 cvtqsi2ss(dst, src);
985 }
986 }
987
988 void TurboAssembler::Cvtqsi2ss(XMMRegister dst, Operand src) {
989 if (CpuFeatures::IsSupported(AVX)) {
990 CpuFeatureScope scope(this, AVX);
991 vcvtqsi2ss(dst, kScratchDoubleReg, src);
992 } else {
993 xorps(dst, dst);
994 cvtqsi2ss(dst, src);
995 }
996 }
997
998 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Register src) {
999 if (CpuFeatures::IsSupported(AVX)) {
1000 CpuFeatureScope scope(this, AVX);
1001 vcvtqsi2sd(dst, kScratchDoubleReg, src);
1002 } else {
1003 xorpd(dst, dst);
1004 cvtqsi2sd(dst, src);
1005 }
1006 }
1007
1008 void TurboAssembler::Cvtqsi2sd(XMMRegister dst, Operand src) {
1009 if (CpuFeatures::IsSupported(AVX)) {
1010 CpuFeatureScope scope(this, AVX);
1011 vcvtqsi2sd(dst, kScratchDoubleReg, src);
1012 } else {
1013 xorpd(dst, dst);
1014 cvtqsi2sd(dst, src);
1015 }
1016 }
1017
1018 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Register src) {
1019 // Zero-extend the 32 bit value to 64 bit.
1020 movl(kScratchRegister, src);
1021 Cvtqsi2ss(dst, kScratchRegister);
1022 }
1023
1024 void TurboAssembler::Cvtlui2ss(XMMRegister dst, Operand src) {
1025 // Zero-extend the 32 bit value to 64 bit.
1026 movl(kScratchRegister, src);
1027 Cvtqsi2ss(dst, kScratchRegister);
1028 }
1029
1030 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Register src) {
1031 // Zero-extend the 32 bit value to 64 bit.
1032 movl(kScratchRegister, src);
1033 Cvtqsi2sd(dst, kScratchRegister);
1034 }
1035
1036 void TurboAssembler::Cvtlui2sd(XMMRegister dst, Operand src) {
1037 // Zero-extend the 32 bit value to 64 bit.
1038 movl(kScratchRegister, src);
1039 Cvtqsi2sd(dst, kScratchRegister);
1040 }
1041
1042 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Register src) {
1043 Label done;
1044 Cvtqsi2ss(dst, src);
1045 testq(src, src);
1046 j(positive, &done, Label::kNear);
1047
1048 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
1049 if (src != kScratchRegister) movq(kScratchRegister, src);
1050 shrq(kScratchRegister, Immediate(1));
1051 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
1052 Label msb_not_set;
1053 j(not_carry, &msb_not_set, Label::kNear);
1054 orq(kScratchRegister, Immediate(1));
1055 bind(&msb_not_set);
1056 Cvtqsi2ss(dst, kScratchRegister);
1057 Addss(dst, dst);
1058 bind(&done);
1059 }
1060
1061 void TurboAssembler::Cvtqui2ss(XMMRegister dst, Operand src) {
1062 movq(kScratchRegister, src);
1063 Cvtqui2ss(dst, kScratchRegister);
1064 }
1065
1066 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Register src) {
1067 Label done;
1068 Cvtqsi2sd(dst, src);
1069 testq(src, src);
1070 j(positive, &done, Label::kNear);
1071
1072 // Compute {src/2 | (src&1)} (retain the LSB to avoid rounding errors).
1073 if (src != kScratchRegister) movq(kScratchRegister, src);
1074 shrq(kScratchRegister, Immediate(1));
1075 // The LSB is shifted into CF. If it is set, set the LSB in {tmp}.
1076 Label msb_not_set;
1077 j(not_carry, &msb_not_set, Label::kNear);
1078 orq(kScratchRegister, Immediate(1));
1079 bind(&msb_not_set);
1080 Cvtqsi2sd(dst, kScratchRegister);
1081 Addsd(dst, dst);
1082 bind(&done);
1083 }
1084
1085 void TurboAssembler::Cvtqui2sd(XMMRegister dst, Operand src) {
1086 movq(kScratchRegister, src);
1087 Cvtqui2sd(dst, kScratchRegister);
1088 }
1089
1090 void TurboAssembler::Cvttss2si(Register dst, XMMRegister src) {
1091 if (CpuFeatures::IsSupported(AVX)) {
1092 CpuFeatureScope scope(this, AVX);
1093 vcvttss2si(dst, src);
1094 } else {
1095 cvttss2si(dst, src);
1096 }
1097 }
1098
1099 void TurboAssembler::Cvttss2si(Register dst, Operand src) {
1100 if (CpuFeatures::IsSupported(AVX)) {
1101 CpuFeatureScope scope(this, AVX);
1102 vcvttss2si(dst, src);
1103 } else {
1104 cvttss2si(dst, src);
1105 }
1106 }
1107
1108 void TurboAssembler::Cvttsd2si(Register dst, XMMRegister src) {
1109 if (CpuFeatures::IsSupported(AVX)) {
1110 CpuFeatureScope scope(this, AVX);
1111 vcvttsd2si(dst, src);
1112 } else {
1113 cvttsd2si(dst, src);
1114 }
1115 }
1116
1117 void TurboAssembler::Cvttsd2si(Register dst, Operand src) {
1118 if (CpuFeatures::IsSupported(AVX)) {
1119 CpuFeatureScope scope(this, AVX);
1120 vcvttsd2si(dst, src);
1121 } else {
1122 cvttsd2si(dst, src);
1123 }
1124 }
1125
1126 void TurboAssembler::Cvttss2siq(Register dst, XMMRegister src) {
1127 if (CpuFeatures::IsSupported(AVX)) {
1128 CpuFeatureScope scope(this, AVX);
1129 vcvttss2siq(dst, src);
1130 } else {
1131 cvttss2siq(dst, src);
1132 }
1133 }
1134
1135 void TurboAssembler::Cvttss2siq(Register dst, Operand src) {
1136 if (CpuFeatures::IsSupported(AVX)) {
1137 CpuFeatureScope scope(this, AVX);
1138 vcvttss2siq(dst, src);
1139 } else {
1140 cvttss2siq(dst, src);
1141 }
1142 }
1143
1144 void TurboAssembler::Cvttsd2siq(Register dst, XMMRegister src) {
1145 if (CpuFeatures::IsSupported(AVX)) {
1146 CpuFeatureScope scope(this, AVX);
1147 vcvttsd2siq(dst, src);
1148 } else {
1149 cvttsd2siq(dst, src);
1150 }
1151 }
1152
1153 void TurboAssembler::Cvttsd2siq(Register dst, Operand src) {
1154 if (CpuFeatures::IsSupported(AVX)) {
1155 CpuFeatureScope scope(this, AVX);
1156 vcvttsd2siq(dst, src);
1157 } else {
1158 cvttsd2siq(dst, src);
1159 }
1160 }
1161
1162 namespace {
1163 template <typename OperandOrXMMRegister, bool is_double>
1164 void ConvertFloatToUint64(TurboAssembler* tasm, Register dst,
1165 OperandOrXMMRegister src, Label* fail) {
1166 Label success;
1167 // There does not exist a native float-to-uint instruction, so we have to use
1168 // a float-to-int, and postprocess the result.
1169 if (is_double) {
1170 tasm->Cvttsd2siq(dst, src);
1171 } else {
1172 tasm->Cvttss2siq(dst, src);
1173 }
1174 // If the result of the conversion is positive, we are already done.
1175 tasm->testq(dst, dst);
1176 tasm->j(positive, &success);
1177 // The result of the first conversion was negative, which means that the
1178 // input value was not within the positive int64 range. We subtract 2^63
1179 // and convert it again to see if it is within the uint64 range.
1180 if (is_double) {
1181 tasm->Move(kScratchDoubleReg, -9223372036854775808.0);
1182 tasm->Addsd(kScratchDoubleReg, src);
1183 tasm->Cvttsd2siq(dst, kScratchDoubleReg);
1184 } else {
1185 tasm->Move(kScratchDoubleReg, -9223372036854775808.0f);
1186 tasm->Addss(kScratchDoubleReg, src);
1187 tasm->Cvttss2siq(dst, kScratchDoubleReg);
1188 }
1189 tasm->testq(dst, dst);
1190 // The only possible negative value here is 0x80000000000000000, which is
1191 // used on x64 to indicate an integer overflow.
1192 tasm->j(negative, fail ? fail : &success);
1193 // The input value is within uint64 range and the second conversion worked
1194 // successfully, but we still have to undo the subtraction we did
1195 // earlier.
1196 tasm->Move(kScratchRegister, 0x8000000000000000);
1197 tasm->orq(dst, kScratchRegister);
1198 tasm->bind(&success);
1199 }
1200 } // namespace
1201
1202 void TurboAssembler::Cvttsd2uiq(Register dst, Operand src, Label* fail) {
1203 ConvertFloatToUint64<Operand, true>(this, dst, src, fail);
1204 }
1205
1206 void TurboAssembler::Cvttsd2uiq(Register dst, XMMRegister src, Label* fail) {
1207 ConvertFloatToUint64<XMMRegister, true>(this, dst, src, fail);
1208 }
1209
1210 void TurboAssembler::Cvttss2uiq(Register dst, Operand src, Label* fail) {
1211 ConvertFloatToUint64<Operand, false>(this, dst, src, fail);
1212 }
1213
1214 void TurboAssembler::Cvttss2uiq(Register dst, XMMRegister src, Label* fail) {
1215 ConvertFloatToUint64<XMMRegister, false>(this, dst, src, fail);
1216 }
1217
1218 void TurboAssembler::Cmpeqss(XMMRegister dst, XMMRegister src) {
1219 if (CpuFeatures::IsSupported(AVX)) {
1220 CpuFeatureScope avx_scope(this, AVX);
1221 vcmpeqss(dst, src);
1222 } else {
1223 cmpeqss(dst, src);
1224 }
1225 }
1226
1227 void TurboAssembler::Cmpeqsd(XMMRegister dst, XMMRegister src) {
1228 if (CpuFeatures::IsSupported(AVX)) {
1229 CpuFeatureScope avx_scope(this, AVX);
1230 vcmpeqsd(dst, src);
1231 } else {
1232 cmpeqsd(dst, src);
1233 }
1234 }
1235
1236 // ----------------------------------------------------------------------------
1237 // Smi tagging, untagging and tag detection.
1238
1239 Register TurboAssembler::GetSmiConstant(Smi source) {
1240 Move(kScratchRegister, source);
1241 return kScratchRegister;
1242 }
1243
1244 void TurboAssembler::Cmp(Register dst, int32_t src) {
1245 if (src == 0) {
1246 testl(dst, dst);
1247 } else {
1248 cmpl(dst, Immediate(src));
1249 }
1250 }
1251
1252 void TurboAssembler::SmiTag(Register reg) {
1253 STATIC_ASSERT(kSmiTag == 0);
1254 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1255 if (COMPRESS_POINTERS_BOOL) {
1256 shll(reg, Immediate(kSmiShift));
1257 } else {
1258 shlq(reg, Immediate(kSmiShift));
1259 }
1260 }
1261
1262 void TurboAssembler::SmiTag(Register dst, Register src) {
1263 DCHECK(dst != src);
1264 if (COMPRESS_POINTERS_BOOL) {
1265 movl(dst, src);
1266 } else {
1267 movq(dst, src);
1268 }
1269 SmiTag(dst);
1270 }
1271
1272 void TurboAssembler::SmiUntag(Register reg) {
1273 STATIC_ASSERT(kSmiTag == 0);
1274 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1275 // TODO(v8:7703): Is there a way to avoid this sign extension when pointer
1276 // compression is enabled?
1277 if (COMPRESS_POINTERS_BOOL) {
1278 movsxlq(reg, reg);
1279 }
1280 sarq(reg, Immediate(kSmiShift));
1281 }
1282
1283 void TurboAssembler::SmiUntag(Register dst, Register src) {
1284 DCHECK(dst != src);
1285 if (COMPRESS_POINTERS_BOOL) {
1286 movsxlq(dst, src);
1287 } else {
1288 movq(dst, src);
1289 }
1290 // TODO(v8:7703): Call SmiUntag(reg) if we can find a way to avoid the extra
1291 // mov when pointer compression is enabled.
1292 STATIC_ASSERT(kSmiTag == 0);
1293 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1294 sarq(dst, Immediate(kSmiShift));
1295 }
1296
1297 void TurboAssembler::SmiUntag(Register dst, Operand src) {
1298 if (SmiValuesAre32Bits()) {
1299 movl(dst, Operand(src, kSmiShift / kBitsPerByte));
1300 // Sign extend to 64-bit.
1301 movsxlq(dst, dst);
1302 } else {
1303 DCHECK(SmiValuesAre31Bits());
1304 if (COMPRESS_POINTERS_BOOL) {
1305 movsxlq(dst, src);
1306 } else {
1307 movq(dst, src);
1308 }
1309 sarq(dst, Immediate(kSmiShift));
1310 }
1311 }
1312
1313 void TurboAssembler::SmiToInt32(Register reg) {
1314 STATIC_ASSERT(kSmiTag == 0);
1315 DCHECK(SmiValuesAre32Bits() || SmiValuesAre31Bits());
1316 if (COMPRESS_POINTERS_BOOL) {
1317 sarl(reg, Immediate(kSmiShift));
1318 } else {
1319 shrq(reg, Immediate(kSmiShift));
1320 }
1321 }
1322
1323 void TurboAssembler::SmiCompare(Register smi1, Register smi2) {
1324 AssertSmi(smi1);
1325 AssertSmi(smi2);
1326 cmp_tagged(smi1, smi2);
1327 }
1328
1329 void TurboAssembler::SmiCompare(Register dst, Smi src) {
1330 AssertSmi(dst);
1331 Cmp(dst, src);
1332 }
1333
1334 void TurboAssembler::Cmp(Register dst, Smi src) {
1335 if (src.value() == 0) {
1336 test_tagged(dst, dst);
1337 } else {
1338 DCHECK_NE(dst, kScratchRegister);
1339 Register constant_reg = GetSmiConstant(src);
1340 cmp_tagged(dst, constant_reg);
1341 }
1342 }
1343
1344 void TurboAssembler::SmiCompare(Register dst, Operand src) {
1345 AssertSmi(dst);
1346 AssertSmi(src);
1347 cmp_tagged(dst, src);
1348 }
1349
1350 void TurboAssembler::SmiCompare(Operand dst, Register src) {
1351 AssertSmi(dst);
1352 AssertSmi(src);
1353 cmp_tagged(dst, src);
1354 }
1355
1356 void TurboAssembler::SmiCompare(Operand dst, Smi src) {
1357 AssertSmi(dst);
1358 if (SmiValuesAre32Bits()) {
1359 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src.value()));
1360 } else {
1361 DCHECK(SmiValuesAre31Bits());
1362 cmpl(dst, Immediate(src));
1363 }
1364 }
1365
1366 void TurboAssembler::Cmp(Operand dst, Smi src) {
1367 // The Operand cannot use the smi register.
1368 Register smi_reg = GetSmiConstant(src);
1369 DCHECK(!dst.AddressUsesRegister(smi_reg));
1370 cmp_tagged(dst, smi_reg);
1371 }
1372
1373 Condition TurboAssembler::CheckSmi(Register src) {
1374 STATIC_ASSERT(kSmiTag == 0);
1375 testb(src, Immediate(kSmiTagMask));
1376 return zero;
1377 }
1378
1379 Condition TurboAssembler::CheckSmi(Operand src) {
1380 STATIC_ASSERT(kSmiTag == 0);
1381 testb(src, Immediate(kSmiTagMask));
1382 return zero;
1383 }
1384
1385 void TurboAssembler::JumpIfSmi(Register src, Label* on_smi,
1386 Label::Distance near_jump) {
1387 Condition smi = CheckSmi(src);
1388 j(smi, on_smi, near_jump);
1389 }
1390
1391 void TurboAssembler::JumpIfNotSmi(Register src, Label* on_not_smi,
1392 Label::Distance near_jump) {
1393 Condition smi = CheckSmi(src);
1394 j(NegateCondition(smi), on_not_smi, near_jump);
1395 }
1396
1397 void TurboAssembler::JumpIfNotSmi(Operand src, Label* on_not_smi,
1398 Label::Distance near_jump) {
1399 Condition smi = CheckSmi(src);
1400 j(NegateCondition(smi), on_not_smi, near_jump);
1401 }
1402
1403 void TurboAssembler::SmiAddConstant(Operand dst, Smi constant) {
1404 if (constant.value() != 0) {
1405 if (SmiValuesAre32Bits()) {
1406 addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant.value()));
1407 } else {
1408 DCHECK(SmiValuesAre31Bits());
1409 if (kTaggedSize == kInt64Size) {
1410 // Sign-extend value after addition
1411 movl(kScratchRegister, dst);
1412 addl(kScratchRegister, Immediate(constant));
1413 movsxlq(kScratchRegister, kScratchRegister);
1414 movq(dst, kScratchRegister);
1415 } else {
1416 DCHECK_EQ(kTaggedSize, kInt32Size);
1417 addl(dst, Immediate(constant));
1418 }
1419 }
1420 }
1421 }
1422
1423 SmiIndex TurboAssembler::SmiToIndex(Register dst, Register src, int shift) {
1424 if (SmiValuesAre32Bits()) {
1425 DCHECK(is_uint6(shift));
1426 // There is a possible optimization if shift is in the range 60-63, but that
1427 // will (and must) never happen.
1428 if (dst != src) {
1429 movq(dst, src);
1430 }
1431 if (shift < kSmiShift) {
1432 sarq(dst, Immediate(kSmiShift - shift));
1433 } else {
1434 shlq(dst, Immediate(shift - kSmiShift));
1435 }
1436 return SmiIndex(dst, times_1);
1437 } else {
1438 DCHECK(SmiValuesAre31Bits());
1439 // We have to sign extend the index register to 64-bit as the SMI might
1440 // be negative.
1441 movsxlq(dst, src);
1442 if (shift < kSmiShift) {
1443 sarq(dst, Immediate(kSmiShift - shift));
1444 } else if (shift != kSmiShift) {
1445 if (shift - kSmiShift <= static_cast<int>(times_8)) {
1446 return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiShift));
1447 }
1448 shlq(dst, Immediate(shift - kSmiShift));
1449 }
1450 return SmiIndex(dst, times_1);
1451 }
1452 }
1453
1454 void TurboAssembler::Push(Smi source) {
1455 intptr_t smi = static_cast<intptr_t>(source.ptr());
1456 if (is_int32(smi)) {
1457 Push(Immediate(static_cast<int32_t>(smi)));
1458 return;
1459 }
1460 int first_byte_set = base::bits::CountTrailingZeros64(smi) / 8;
1461 int last_byte_set = (63 - base::bits::CountLeadingZeros64(smi)) / 8;
1462 if (first_byte_set == last_byte_set) {
1463 // This sequence has only 7 bytes, compared to the 12 bytes below.
1464 Push(Immediate(0));
1465 movb(Operand(rsp, first_byte_set),
1466 Immediate(static_cast<int8_t>(smi >> (8 * first_byte_set))));
1467 return;
1468 }
1469 Register constant = GetSmiConstant(source);
1470 Push(constant);
1471 }
1472
1473 // ----------------------------------------------------------------------------
1474
1475 void TurboAssembler::Move(Register dst, Smi source) {
1476 STATIC_ASSERT(kSmiTag == 0);
1477 int value = source.value();
1478 if (value == 0) {
1479 xorl(dst, dst);
1480 } else if (SmiValuesAre32Bits()) {
1481 Move(dst, source.ptr(), RelocInfo::NO_INFO);
1482 } else {
1483 uint32_t uvalue = static_cast<uint32_t>(source.ptr());
1484 Move(dst, uvalue);
1485 }
1486 }
1487
1488 void TurboAssembler::Move(Operand dst, intptr_t x) {
1489 if (is_int32(x)) {
1490 movq(dst, Immediate(static_cast<int32_t>(x)));
1491 } else {
1492 Move(kScratchRegister, x);
1493 movq(dst, kScratchRegister);
1494 }
1495 }
1496
1497 void TurboAssembler::Move(Register dst, ExternalReference ext) {
1498 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1499 // non-isolate-independent code. In many cases it might be cheaper than
1500 // embedding the relocatable value.
1501 if (root_array_available_ && options().isolate_independent_code) {
1502 IndirectLoadExternalReference(dst, ext);
1503 return;
1504 }
1505 movq(dst, Immediate64(ext.address(), RelocInfo::EXTERNAL_REFERENCE));
1506 }
1507
1508 void TurboAssembler::Move(Register dst, Register src) {
1509 if (dst != src) {
1510 movq(dst, src);
1511 }
1512 }
1513
1514 void TurboAssembler::Move(Register dst, Operand src) { movq(dst, src); }
1515 void TurboAssembler::Move(Register dst, Immediate src) {
1516 if (src.rmode() == RelocInfo::Mode::NO_INFO) {
1517 Move(dst, src.value());
1518 } else {
1519 movl(dst, src);
1520 }
1521 }
1522
1523 void TurboAssembler::Move(XMMRegister dst, XMMRegister src) {
1524 if (dst != src) {
1525 Movaps(dst, src);
1526 }
1527 }
1528
1529 void TurboAssembler::MovePair(Register dst0, Register src0, Register dst1,
1530 Register src1) {
1531 if (dst0 != src1) {
1532 // Normal case: Writing to dst0 does not destroy src1.
1533 Move(dst0, src0);
1534 Move(dst1, src1);
1535 } else if (dst1 != src0) {
1536 // Only dst0 and src1 are the same register,
1537 // but writing to dst1 does not destroy src0.
1538 Move(dst1, src1);
1539 Move(dst0, src0);
1540 } else {
1541 // dst0 == src1, and dst1 == src0, a swap is required:
1542 // dst0 \/ src0
1543 // dst1 /\ src1
1544 xchgq(dst0, dst1);
1545 }
1546 }
1547
1548 void TurboAssembler::MoveNumber(Register dst, double value) {
1549 int32_t smi;
1550 if (DoubleToSmiInteger(value, &smi)) {
1551 Move(dst, Smi::FromInt(smi));
1552 } else {
1553 movq_heap_number(dst, value);
1554 }
1555 }
1556
1557 void TurboAssembler::Move(XMMRegister dst, uint32_t src) {
1558 if (src == 0) {
1559 Xorps(dst, dst);
1560 } else {
1561 unsigned nlz = base::bits::CountLeadingZeros(src);
1562 unsigned ntz = base::bits::CountTrailingZeros(src);
1563 unsigned pop = base::bits::CountPopulation(src);
1564 DCHECK_NE(0u, pop);
1565 if (pop + ntz + nlz == 32) {
1566 Pcmpeqd(dst, dst);
1567 if (ntz) Pslld(dst, static_cast<byte>(ntz + nlz));
1568 if (nlz) Psrld(dst, static_cast<byte>(nlz));
1569 } else {
1570 movl(kScratchRegister, Immediate(src));
1571 Movd(dst, kScratchRegister);
1572 }
1573 }
1574 }
1575
1576 void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
1577 if (src == 0) {
1578 Xorpd(dst, dst);
1579 } else {
1580 unsigned nlz = base::bits::CountLeadingZeros(src);
1581 unsigned ntz = base::bits::CountTrailingZeros(src);
1582 unsigned pop = base::bits::CountPopulation(src);
1583 DCHECK_NE(0u, pop);
1584 if (pop + ntz + nlz == 64) {
1585 Pcmpeqd(dst, dst);
1586 if (ntz) Psllq(dst, static_cast<byte>(ntz + nlz));
1587 if (nlz) Psrlq(dst, static_cast<byte>(nlz));
1588 } else {
1589 uint32_t lower = static_cast<uint32_t>(src);
1590 uint32_t upper = static_cast<uint32_t>(src >> 32);
1591 if (upper == 0) {
1592 Move(dst, lower);
1593 } else {
1594 movq(kScratchRegister, src);
1595 Movq(dst, kScratchRegister);
1596 }
1597 }
1598 }
1599 }
1600
1601 void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
1602 if (high == low) {
1603 Move(dst, low);
1604 Punpcklqdq(dst, dst);
1605 return;
1606 }
1607
1608 Move(dst, low);
1609 movq(kScratchRegister, high);
1610 Pinsrq(dst, dst, kScratchRegister, uint8_t{1});
1611 }
1612
1613 // ----------------------------------------------------------------------------
1614
1615 void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
1616 if (source->IsSmi()) {
1617 Cmp(dst, Smi::cast(*source));
1618 } else {
1619 Move(kScratchRegister, Handle<HeapObject>::cast(source));
1620 cmp_tagged(dst, kScratchRegister);
1621 }
1622 }
1623
1624 void MacroAssembler::Cmp(Operand dst, Handle<Object> source) {
1625 if (source->IsSmi()) {
1626 Cmp(dst, Smi::cast(*source));
1627 } else {
1628 Move(kScratchRegister, Handle<HeapObject>::cast(source));
1629 cmp_tagged(dst, kScratchRegister);
1630 }
1631 }
1632
1633 void MacroAssembler::CompareRange(Register value, unsigned lower_limit,
1634 unsigned higher_limit) {
1635 ASM_CODE_COMMENT(this);
1636 DCHECK_LT(lower_limit, higher_limit);
1637 if (lower_limit != 0) {
1638 leal(kScratchRegister, Operand(value, 0u - lower_limit));
1639 cmpl(kScratchRegister, Immediate(higher_limit - lower_limit));
1640 } else {
1641 cmpl(value, Immediate(higher_limit));
1642 }
1643 }
1644
1645 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1646 unsigned higher_limit, Label* on_in_range,
1647 Label::Distance near_jump) {
1648 CompareRange(value, lower_limit, higher_limit);
1649 j(below_equal, on_in_range, near_jump);
1650 }
1651
1652 void TurboAssembler::Push(Handle<HeapObject> source) {
1653 Move(kScratchRegister, source);
1654 Push(kScratchRegister);
1655 }
1656
1657 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
1658 PushArrayOrder order) {
1659 DCHECK(!AreAliased(array, size, scratch));
1660 Register counter = scratch;
1661 Label loop, entry;
1662 if (order == PushArrayOrder::kReverse) {
1663 Move(counter, 0);
1664 jmp(&entry);
1665 bind(&loop);
1666 Push(Operand(array, counter, times_system_pointer_size, 0));
1667 incq(counter);
1668 bind(&entry);
1669 cmpq(counter, size);
1670 j(less, &loop, Label::kNear);
1671 } else {
1672 movq(counter, size);
1673 jmp(&entry);
1674 bind(&loop);
1675 Push(Operand(array, counter, times_system_pointer_size, 0));
1676 bind(&entry);
1677 decq(counter);
1678 j(greater_equal, &loop, Label::kNear);
1679 }
1680 }
1681
1682 void TurboAssembler::Move(Register result, Handle<HeapObject> object,
1683 RelocInfo::Mode rmode) {
1684 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
1685 // non-isolate-independent code. In many cases it might be cheaper than
1686 // embedding the relocatable value.
1687 if (root_array_available_ && options().isolate_independent_code) {
1688 // TODO(v8:9706): Fix-it! This load will always uncompress the value
1689 // even when we are loading a compressed embedded object.
1690 IndirectLoadConstant(result, object);
1691 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
1692 EmbeddedObjectIndex index = AddEmbeddedObject(object);
1693 DCHECK(is_uint32(index));
1694 movl(result, Immediate(static_cast<int>(index), rmode));
1695 } else {
1696 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
1697 movq(result, Immediate64(object.address(), rmode));
1698 }
1699 }
1700
1701 void TurboAssembler::Move(Operand dst, Handle<HeapObject> object,
1702 RelocInfo::Mode rmode) {
1703 Move(kScratchRegister, object, rmode);
1704 movq(dst, kScratchRegister);
1705 }
1706
1707 void TurboAssembler::MoveStringConstant(Register result,
1708 const StringConstantBase* string,
1709 RelocInfo::Mode rmode) {
1710 movq_string(result, string);
1711 }
1712
1713 void MacroAssembler::Drop(int stack_elements) {
1714 if (stack_elements > 0) {
1715 addq(rsp, Immediate(stack_elements * kSystemPointerSize));
1716 }
1717 }
1718
1719 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
1720 Register scratch) {
1721 DCHECK_GT(stack_elements, 0);
1722 if (stack_elements == 1) {
1723 popq(MemOperand(rsp, 0));
1724 return;
1725 }
1726
1727 PopReturnAddressTo(scratch);
1728 Drop(stack_elements);
1729 PushReturnAddressFrom(scratch);
1730 }
1731
1732 void TurboAssembler::DropArguments(Register count, ArgumentsCountType type,
1733 ArgumentsCountMode mode) {
1734 int receiver_bytes =
1735 (mode == kCountExcludesReceiver) ? kSystemPointerSize : 0;
1736 switch (type) {
1737 case kCountIsInteger: {
1738 leaq(rsp, Operand(rsp, count, times_system_pointer_size, receiver_bytes));
1739 break;
1740 }
1741 case kCountIsSmi: {
1742 SmiIndex index = SmiToIndex(count, count, kSystemPointerSizeLog2);
1743 leaq(rsp, Operand(rsp, index.reg, index.scale, receiver_bytes));
1744 break;
1745 }
1746 case kCountIsBytes: {
1747 if (receiver_bytes == 0) {
1748 addq(rsp, count);
1749 } else {
1750 leaq(rsp, Operand(rsp, count, times_1, receiver_bytes));
1751 }
1752 break;
1753 }
1754 }
1755 }
1756
1757 void TurboAssembler::DropArguments(Register count, Register scratch,
1758 ArgumentsCountType type,
1759 ArgumentsCountMode mode) {
1760 DCHECK(!AreAliased(count, scratch));
1761 PopReturnAddressTo(scratch);
1762 DropArguments(count, type, mode);
1763 PushReturnAddressFrom(scratch);
1764 }
1765
1766 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
1767 Register receiver,
1768 Register scratch,
1769 ArgumentsCountType type,
1770 ArgumentsCountMode mode) {
1771 DCHECK(!AreAliased(argc, receiver, scratch));
1772 PopReturnAddressTo(scratch);
1773 DropArguments(argc, type, mode);
1774 Push(receiver);
1775 PushReturnAddressFrom(scratch);
1776 }
1777
1778 void TurboAssembler::DropArgumentsAndPushNewReceiver(Register argc,
1779 Operand receiver,
1780 Register scratch,
1781 ArgumentsCountType type,
1782 ArgumentsCountMode mode) {
1783 DCHECK(!AreAliased(argc, scratch));
1784 DCHECK(!receiver.AddressUsesRegister(scratch));
1785 PopReturnAddressTo(scratch);
1786 DropArguments(argc, type, mode);
1787 Push(receiver);
1788 PushReturnAddressFrom(scratch);
1789 }
1790
1791 void TurboAssembler::Push(Register src) { pushq(src); }
1792
1793 void TurboAssembler::Push(Operand src) { pushq(src); }
1794
1795 void MacroAssembler::PushQuad(Operand src) { pushq(src); }
1796
1797 void TurboAssembler::Push(Immediate value) { pushq(value); }
1798
1799 void MacroAssembler::PushImm32(int32_t imm32) { pushq_imm32(imm32); }
1800
1801 void MacroAssembler::Pop(Register dst) { popq(dst); }
1802
1803 void MacroAssembler::Pop(Operand dst) { popq(dst); }
1804
1805 void MacroAssembler::PopQuad(Operand dst) { popq(dst); }
1806
1807 void TurboAssembler::Jump(const ExternalReference& reference) {
1808 DCHECK(root_array_available());
1809 jmp(Operand(kRootRegister, RootRegisterOffsetForExternalReferenceTableEntry(
1810 isolate(), reference)));
1811 }
1812
1813 void TurboAssembler::Jump(Operand op) { jmp(op); }
1814
1815 void TurboAssembler::Jump(Address destination, RelocInfo::Mode rmode) {
1816 Move(kScratchRegister, destination, rmode);
1817 jmp(kScratchRegister);
1818 }
1819
1820 void TurboAssembler::Jump(Handle<CodeT> code_object, RelocInfo::Mode rmode,
1821 Condition cc) {
1822 DCHECK_IMPLIES(
1823 options().isolate_independent_code,
1824 Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code_object)));
1825 if (options().inline_offheap_trampolines) {
1826 Builtin builtin = Builtin::kNoBuiltinId;
1827 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
1828 Label skip;
1829 if (cc != always) {
1830 if (cc == never) return;
1831 j(NegateCondition(cc), &skip, Label::kNear);
1832 }
1833 TailCallBuiltin(builtin);
1834 bind(&skip);
1835 return;
1836 }
1837 }
1838 j(cc, code_object, rmode);
1839 }
1840
1841 void MacroAssembler::JumpToOffHeapInstructionStream(Address entry) {
1842 Move(kOffHeapTrampolineRegister, entry, RelocInfo::OFF_HEAP_TARGET);
1843 jmp(kOffHeapTrampolineRegister);
1844 }
1845
1846 void TurboAssembler::Call(ExternalReference ext) {
1847 LoadAddress(kScratchRegister, ext);
1848 call(kScratchRegister);
1849 }
1850
1851 void TurboAssembler::Call(Operand op) {
1852 if (!CpuFeatures::IsSupported(INTEL_ATOM)) {
1853 call(op);
1854 } else {
1855 movq(kScratchRegister, op);
1856 call(kScratchRegister);
1857 }
1858 }
1859
1860 void TurboAssembler::Call(Address destination, RelocInfo::Mode rmode) {
1861 Move(kScratchRegister, destination, rmode);
1862 call(kScratchRegister);
1863 }
1864
1865 void TurboAssembler::Call(Handle<CodeT> code_object, RelocInfo::Mode rmode) {
1866 // TODO(v8:11880): avoid roundtrips between cdc and code.
1867 DCHECK_IMPLIES(
1868 options().isolate_independent_code,
1869 Builtins::IsIsolateIndependentBuiltin(FromCodeT(*code_object)));
1870 if (options().inline_offheap_trampolines) {
1871 Builtin builtin = Builtin::kNoBuiltinId;
1872 if (isolate()->builtins()->IsBuiltinHandle(code_object, &builtin)) {
1873 // Inline the trampoline.
1874 CallBuiltin(builtin);
1875 return;
1876 }
1877 }
1878 DCHECK(RelocInfo::IsCodeTarget(rmode));
1879 call(code_object, rmode);
1880 }
1881
1882 Operand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
1883 DCHECK(root_array_available());
1884 return Operand(kRootRegister, IsolateData::BuiltinEntrySlotOffset(builtin));
1885 }
1886
1887 Operand TurboAssembler::EntryFromBuiltinIndexAsOperand(Register builtin_index) {
1888 if (SmiValuesAre32Bits()) {
1889 // The builtin_index register contains the builtin index as a Smi.
1890 SmiUntag(builtin_index);
1891 return Operand(kRootRegister, builtin_index, times_system_pointer_size,
1892 IsolateData::builtin_entry_table_offset());
1893 } else {
1894 DCHECK(SmiValuesAre31Bits());
1895
1896 // The builtin_index register contains the builtin index as a Smi.
1897 // Untagging is folded into the indexing operand below (we use
1898 // times_half_system_pointer_size since smis are already shifted by one).
1899 return Operand(kRootRegister, builtin_index, times_half_system_pointer_size,
1900 IsolateData::builtin_entry_table_offset());
1901 }
1902 }
1903
1904 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
1905 Call(EntryFromBuiltinIndexAsOperand(builtin_index));
1906 }
1907
1908 void TurboAssembler::CallBuiltin(Builtin builtin) {
1909 ASM_CODE_COMMENT_STRING(this, CommentForOffHeapTrampoline("call", builtin));
1910 if (options().short_builtin_calls) {
1911 call(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
1912 } else {
1913 Move(kScratchRegister, BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
1914 call(kScratchRegister);
1915 }
1916 }
1917
1918 void TurboAssembler::TailCallBuiltin(Builtin builtin) {
1919 ASM_CODE_COMMENT_STRING(this,
1920 CommentForOffHeapTrampoline("tail call", builtin));
1921 if (options().short_builtin_calls) {
1922 jmp(BuiltinEntry(builtin), RelocInfo::RUNTIME_ENTRY);
1923 } else {
1924 Jump(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
1925 }
1926 }
1927
1928 void TurboAssembler::LoadCodeObjectEntry(Register destination,
1929 Register code_object) {
1930 ASM_CODE_COMMENT(this);
1931 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
1932 LoadExternalPointerField(
1933 destination,
1934 FieldOperand(code_object, CodeDataContainer::kCodeEntryPointOffset),
1935 kCodeEntryPointTag, kScratchRegister);
1936 return;
1937 }
1938
1939 // Code objects are called differently depending on whether we are generating
1940 // builtin code (which will later be embedded into the binary) or compiling
1941 // user JS code at runtime.
1942 // * Builtin code runs in --jitless mode and thus must not call into on-heap
1943 // Code targets. Instead, we dispatch through the builtins entry table.
1944 // * Codegen at runtime does not have this restriction and we can use the
1945 // shorter, branchless instruction sequence. The assumption here is that
1946 // targets are usually generated code and not builtin Code objects.
1947
1948 if (options().isolate_independent_code) {
1949 DCHECK(root_array_available());
1950 Label if_code_is_off_heap, out;
1951
1952 // Check whether the Code object is an off-heap trampoline. If so, call its
1953 // (off-heap) entry point directly without going through the (on-heap)
1954 // trampoline. Otherwise, just call the Code object as always.
1955 testl(FieldOperand(code_object, Code::kFlagsOffset),
1956 Immediate(Code::IsOffHeapTrampoline::kMask));
1957 j(not_equal, &if_code_is_off_heap);
1958
1959 // Not an off-heap trampoline, the entry point is at
1960 // Code::raw_instruction_start().
1961 Move(destination, code_object);
1962 addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1963 jmp(&out);
1964
1965 // An off-heap trampoline, the entry point is loaded from the builtin entry
1966 // table.
1967 bind(&if_code_is_off_heap);
1968 movl(destination, FieldOperand(code_object, Code::kBuiltinIndexOffset));
1969 movq(destination,
1970 Operand(kRootRegister, destination, times_system_pointer_size,
1971 IsolateData::builtin_entry_table_offset()));
1972
1973 bind(&out);
1974 } else {
1975 Move(destination, code_object);
1976 addq(destination, Immediate(Code::kHeaderSize - kHeapObjectTag));
1977 }
1978 }
1979
1980 void TurboAssembler::CallCodeObject(Register code_object) {
1981 LoadCodeObjectEntry(code_object, code_object);
1982 call(code_object);
1983 }
1984
1985 void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
1986 LoadCodeObjectEntry(code_object, code_object);
1987 switch (jump_mode) {
1988 case JumpMode::kJump:
1989 jmp(code_object);
1990 return;
1991 case JumpMode::kPushAndReturn:
1992 pushq(code_object);
1993 Ret();
1994 return;
1995 }
1996 }
1997
1998 void TurboAssembler::LoadCodeDataContainerEntry(
1999 Register destination, Register code_data_container_object) {
2000 ASM_CODE_COMMENT(this);
2001 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
2002 LoadExternalPointerField(
2003 destination,
2004 FieldOperand(code_data_container_object,
2005 CodeDataContainer::kCodeEntryPointOffset),
2006 kCodeEntryPointTag, kScratchRegister);
2007 }
2008
2009 void TurboAssembler::LoadCodeDataContainerCodeNonBuiltin(
2010 Register destination, Register code_data_container_object) {
2011 ASM_CODE_COMMENT(this);
2012 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
2013 // Given the fields layout we can read the Code reference as a full word.
2014 STATIC_ASSERT(!V8_EXTERNAL_CODE_SPACE_BOOL ||
2015 (CodeDataContainer::kCodeCageBaseUpper32BitsOffset ==
2016 CodeDataContainer::kCodeOffset + kTaggedSize));
2017 movq(destination, FieldOperand(code_data_container_object,
2018 CodeDataContainer::kCodeOffset));
2019 }
2020
2021 void TurboAssembler::CallCodeDataContainerObject(
2022 Register code_data_container_object) {
2023 LoadCodeDataContainerEntry(code_data_container_object,
2024 code_data_container_object);
2025 call(code_data_container_object);
2026 }
2027
2028 void TurboAssembler::JumpCodeDataContainerObject(
2029 Register code_data_container_object, JumpMode jump_mode) {
2030 LoadCodeDataContainerEntry(code_data_container_object,
2031 code_data_container_object);
2032 switch (jump_mode) {
2033 case JumpMode::kJump:
2034 jmp(code_data_container_object);
2035 return;
2036 case JumpMode::kPushAndReturn:
2037 pushq(code_data_container_object);
2038 Ret();
2039 return;
2040 }
2041 }
2042
2043 void TurboAssembler::LoadCodeTEntry(Register destination, Register code) {
2044 ASM_CODE_COMMENT(this);
2045 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2046 LoadCodeDataContainerEntry(destination, code);
2047 } else {
2048 leaq(destination, Operand(code, Code::kHeaderSize - kHeapObjectTag));
2049 }
2050 }
2051
2052 void TurboAssembler::CallCodeTObject(Register code) {
2053 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2054 CallCodeDataContainerObject(code);
2055 } else {
2056 CallCodeObject(code);
2057 }
2058 }
2059
2060 void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
2061 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
2062 JumpCodeDataContainerObject(code, jump_mode);
2063 } else {
2064 JumpCodeObject(code, jump_mode);
2065 }
2066 }
2067
2068 void TurboAssembler::PextrdPreSse41(Register dst, XMMRegister src,
2069 uint8_t imm8) {
2070 if (imm8 == 0) {
2071 Movd(dst, src);
2072 return;
2073 }
2074 DCHECK_EQ(1, imm8);
2075 movq(dst, src);
2076 shrq(dst, Immediate(32));
2077 }
2078
2079 namespace {
2080 template <typename Op>
2081 void PinsrdPreSse41Helper(TurboAssembler* tasm, XMMRegister dst, Op src,
2082 uint8_t imm8, uint32_t* load_pc_offset) {
2083 tasm->Movd(kScratchDoubleReg, src);
2084 if (load_pc_offset) *load_pc_offset = tasm->pc_offset();
2085 if (imm8 == 1) {
2086 tasm->punpckldq(dst, kScratchDoubleReg);
2087 } else {
2088 DCHECK_EQ(0, imm8);
2089 tasm->Movss(dst, kScratchDoubleReg);
2090 }
2091 }
2092 } // namespace
2093
2094 void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Register src, uint8_t imm8,
2095 uint32_t* load_pc_offset) {
2096 PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
2097 }
2098
2099 void TurboAssembler::PinsrdPreSse41(XMMRegister dst, Operand src, uint8_t imm8,
2100 uint32_t* load_pc_offset) {
2101 PinsrdPreSse41Helper(this, dst, src, imm8, load_pc_offset);
2102 }
2103
2104 void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
2105 uint8_t imm8, uint32_t* load_pc_offset) {
2106 PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
2107 imm8, load_pc_offset, {SSE4_1});
2108 }
2109
2110 void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
2111 uint8_t imm8, uint32_t* load_pc_offset) {
2112 PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
2113 imm8, load_pc_offset, {SSE4_1});
2114 }
2115
2116 void TurboAssembler::Lzcntl(Register dst, Register src) {
2117 if (CpuFeatures::IsSupported(LZCNT)) {
2118 CpuFeatureScope scope(this, LZCNT);
2119 lzcntl(dst, src);
2120 return;
2121 }
2122 Label not_zero_src;
2123 bsrl(dst, src);
2124 j(not_zero, ¬_zero_src, Label::kNear);
2125 Move(dst, 63); // 63^31 == 32
2126 bind(¬_zero_src);
2127 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2128 }
2129
2130 void TurboAssembler::Lzcntl(Register dst, Operand src) {
2131 if (CpuFeatures::IsSupported(LZCNT)) {
2132 CpuFeatureScope scope(this, LZCNT);
2133 lzcntl(dst, src);
2134 return;
2135 }
2136 Label not_zero_src;
2137 bsrl(dst, src);
2138 j(not_zero, ¬_zero_src, Label::kNear);
2139 Move(dst, 63); // 63^31 == 32
2140 bind(¬_zero_src);
2141 xorl(dst, Immediate(31)); // for x in [0..31], 31^x == 31 - x
2142 }
2143
2144 void TurboAssembler::Lzcntq(Register dst, Register src) {
2145 if (CpuFeatures::IsSupported(LZCNT)) {
2146 CpuFeatureScope scope(this, LZCNT);
2147 lzcntq(dst, src);
2148 return;
2149 }
2150 Label not_zero_src;
2151 bsrq(dst, src);
2152 j(not_zero, ¬_zero_src, Label::kNear);
2153 Move(dst, 127); // 127^63 == 64
2154 bind(¬_zero_src);
2155 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
2156 }
2157
2158 void TurboAssembler::Lzcntq(Register dst, Operand src) {
2159 if (CpuFeatures::IsSupported(LZCNT)) {
2160 CpuFeatureScope scope(this, LZCNT);
2161 lzcntq(dst, src);
2162 return;
2163 }
2164 Label not_zero_src;
2165 bsrq(dst, src);
2166 j(not_zero, ¬_zero_src, Label::kNear);
2167 Move(dst, 127); // 127^63 == 64
2168 bind(¬_zero_src);
2169 xorl(dst, Immediate(63)); // for x in [0..63], 63^x == 63 - x
2170 }
2171
2172 void TurboAssembler::Tzcntq(Register dst, Register src) {
2173 if (CpuFeatures::IsSupported(BMI1)) {
2174 CpuFeatureScope scope(this, BMI1);
2175 tzcntq(dst, src);
2176 return;
2177 }
2178 Label not_zero_src;
2179 bsfq(dst, src);
2180 j(not_zero, ¬_zero_src, Label::kNear);
2181 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
2182 Move(dst, 64);
2183 bind(¬_zero_src);
2184 }
2185
2186 void TurboAssembler::Tzcntq(Register dst, Operand src) {
2187 if (CpuFeatures::IsSupported(BMI1)) {
2188 CpuFeatureScope scope(this, BMI1);
2189 tzcntq(dst, src);
2190 return;
2191 }
2192 Label not_zero_src;
2193 bsfq(dst, src);
2194 j(not_zero, ¬_zero_src, Label::kNear);
2195 // Define the result of tzcnt(0) separately, because bsf(0) is undefined.
2196 Move(dst, 64);
2197 bind(¬_zero_src);
2198 }
2199
2200 void TurboAssembler::Tzcntl(Register dst, Register src) {
2201 if (CpuFeatures::IsSupported(BMI1)) {
2202 CpuFeatureScope scope(this, BMI1);
2203 tzcntl(dst, src);
2204 return;
2205 }
2206 Label not_zero_src;
2207 bsfl(dst, src);
2208 j(not_zero, ¬_zero_src, Label::kNear);
2209 Move(dst, 32); // The result of tzcnt is 32 if src = 0.
2210 bind(¬_zero_src);
2211 }
2212
2213 void TurboAssembler::Tzcntl(Register dst, Operand src) {
2214 if (CpuFeatures::IsSupported(BMI1)) {
2215 CpuFeatureScope scope(this, BMI1);
2216 tzcntl(dst, src);
2217 return;
2218 }
2219 Label not_zero_src;
2220 bsfl(dst, src);
2221 j(not_zero, ¬_zero_src, Label::kNear);
2222 Move(dst, 32); // The result of tzcnt is 32 if src = 0.
2223 bind(¬_zero_src);
2224 }
2225
2226 void TurboAssembler::Popcntl(Register dst, Register src) {
2227 if (CpuFeatures::IsSupported(POPCNT)) {
2228 CpuFeatureScope scope(this, POPCNT);
2229 popcntl(dst, src);
2230 return;
2231 }
2232 UNREACHABLE();
2233 }
2234
2235 void TurboAssembler::Popcntl(Register dst, Operand src) {
2236 if (CpuFeatures::IsSupported(POPCNT)) {
2237 CpuFeatureScope scope(this, POPCNT);
2238 popcntl(dst, src);
2239 return;
2240 }
2241 UNREACHABLE();
2242 }
2243
2244 void TurboAssembler::Popcntq(Register dst, Register src) {
2245 if (CpuFeatures::IsSupported(POPCNT)) {
2246 CpuFeatureScope scope(this, POPCNT);
2247 popcntq(dst, src);
2248 return;
2249 }
2250 UNREACHABLE();
2251 }
2252
2253 void TurboAssembler::Popcntq(Register dst, Operand src) {
2254 if (CpuFeatures::IsSupported(POPCNT)) {
2255 CpuFeatureScope scope(this, POPCNT);
2256 popcntq(dst, src);
2257 return;
2258 }
2259 UNREACHABLE();
2260 }
2261
2262 void MacroAssembler::PushStackHandler() {
2263 // Adjust this code if not the case.
2264 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
2265 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2266
2267 Push(Immediate(0)); // Padding.
2268
2269 // Link the current handler as the next handler.
2270 ExternalReference handler_address =
2271 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
2272 Push(ExternalReferenceAsOperand(handler_address));
2273
2274 // Set this new handler as the current one.
2275 movq(ExternalReferenceAsOperand(handler_address), rsp);
2276 }
2277
2278 void MacroAssembler::PopStackHandler() {
2279 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2280 ExternalReference handler_address =
2281 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate());
2282 Pop(ExternalReferenceAsOperand(handler_address));
2283 addq(rsp, Immediate(StackHandlerConstants::kSize - kSystemPointerSize));
2284 }
2285
2286 void TurboAssembler::Ret() { ret(0); }
2287
2288 void TurboAssembler::Ret(int bytes_dropped, Register scratch) {
2289 if (is_uint16(bytes_dropped)) {
2290 ret(bytes_dropped);
2291 } else {
2292 PopReturnAddressTo(scratch);
2293 addq(rsp, Immediate(bytes_dropped));
2294 PushReturnAddressFrom(scratch);
2295 ret(0);
2296 }
2297 }
2298
2299 void TurboAssembler::IncsspqIfSupported(Register number_of_words,
2300 Register scratch) {
2301 // Optimized code can validate at runtime whether the cpu supports the
2302 // incsspq instruction, so it shouldn't use this method.
2303 CHECK(isolate()->IsGeneratingEmbeddedBuiltins());
2304 DCHECK_NE(number_of_words, scratch);
2305 Label not_supported;
2306 ExternalReference supports_cetss =
2307 ExternalReference::supports_cetss_address();
2308 Operand supports_cetss_operand =
2309 ExternalReferenceAsOperand(supports_cetss, scratch);
2310 cmpb(supports_cetss_operand, Immediate(0));
2311 j(equal, ¬_supported, Label::kNear);
2312 incsspq(number_of_words);
2313 bind(¬_supported);
2314 }
2315
2316 void MacroAssembler::CmpObjectType(Register heap_object, InstanceType type,
2317 Register map) {
2318 LoadMap(map, heap_object);
2319 CmpInstanceType(map, type);
2320 }
2321
2322 void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
2323 cmpw(FieldOperand(map, Map::kInstanceTypeOffset), Immediate(type));
2324 }
2325
2326 void MacroAssembler::CmpInstanceTypeRange(Register map,
2327 Register instance_type_out,
2328 InstanceType lower_limit,
2329 InstanceType higher_limit) {
2330 DCHECK_LT(lower_limit, higher_limit);
2331 movzxwl(instance_type_out, FieldOperand(map, Map::kInstanceTypeOffset));
2332 CompareRange(instance_type_out, lower_limit, higher_limit);
2333 }
2334
2335 void TurboAssembler::AssertNotSmi(Register object) {
2336 if (!FLAG_debug_code) return;
2337 ASM_CODE_COMMENT(this);
2338 Condition is_smi = CheckSmi(object);
2339 Check(NegateCondition(is_smi), AbortReason::kOperandIsASmi);
2340 }
2341
2342 void TurboAssembler::AssertSmi(Register object) {
2343 if (!FLAG_debug_code) return;
2344 ASM_CODE_COMMENT(this);
2345 Condition is_smi = CheckSmi(object);
2346 Check(is_smi, AbortReason::kOperandIsNotASmi);
2347 }
2348
2349 void TurboAssembler::AssertSmi(Operand object) {
2350 if (!FLAG_debug_code) return;
2351 ASM_CODE_COMMENT(this);
2352 Condition is_smi = CheckSmi(object);
2353 Check(is_smi, AbortReason::kOperandIsNotASmi);
2354 }
2355
2356 void TurboAssembler::AssertZeroExtended(Register int32_register) {
2357 if (!FLAG_debug_code) return;
2358 ASM_CODE_COMMENT(this);
2359 DCHECK_NE(int32_register, kScratchRegister);
2360 movq(kScratchRegister, int64_t{0x0000000100000000});
2361 cmpq(kScratchRegister, int32_register);
2362 Check(above, AbortReason::k32BitValueInRegisterIsNotZeroExtended);
2363 }
2364
2365 void MacroAssembler::AssertCodeT(Register object) {
2366 if (!FLAG_debug_code) return;
2367 ASM_CODE_COMMENT(this);
2368 testb(object, Immediate(kSmiTagMask));
2369 Check(not_equal, AbortReason::kOperandIsNotACodeT);
2370 Push(object);
2371 LoadMap(object, object);
2372 CmpInstanceType(object, CODET_TYPE);
2373 Pop(object);
2374 Check(equal, AbortReason::kOperandIsNotACodeT);
2375 }
2376
2377 void MacroAssembler::AssertConstructor(Register object) {
2378 if (!FLAG_debug_code) return;
2379 ASM_CODE_COMMENT(this);
2380 testb(object, Immediate(kSmiTagMask));
2381 Check(not_equal, AbortReason::kOperandIsASmiAndNotAConstructor);
2382 Push(object);
2383 LoadMap(object, object);
2384 testb(FieldOperand(object, Map::kBitFieldOffset),
2385 Immediate(Map::Bits1::IsConstructorBit::kMask));
2386 Pop(object);
2387 Check(not_zero, AbortReason::kOperandIsNotAConstructor);
2388 }
2389
2390 void MacroAssembler::AssertFunction(Register object) {
2391 if (!FLAG_debug_code) return;
2392 ASM_CODE_COMMENT(this);
2393 testb(object, Immediate(kSmiTagMask));
2394 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
2395 Push(object);
2396 LoadMap(object, object);
2397 CmpInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE,
2398 LAST_JS_FUNCTION_TYPE);
2399 Pop(object);
2400 Check(below_equal, AbortReason::kOperandIsNotAFunction);
2401 }
2402
2403 void MacroAssembler::AssertCallableFunction(Register object) {
2404 if (!FLAG_debug_code) return;
2405 ASM_CODE_COMMENT(this);
2406 testb(object, Immediate(kSmiTagMask));
2407 Check(not_equal, AbortReason::kOperandIsASmiAndNotAFunction);
2408 Push(object);
2409 LoadMap(object, object);
2410 CmpInstanceTypeRange(object, object, FIRST_CALLABLE_JS_FUNCTION_TYPE,
2411 LAST_CALLABLE_JS_FUNCTION_TYPE);
2412 Pop(object);
2413 Check(below_equal, AbortReason::kOperandIsNotACallableFunction);
2414 }
2415
2416 void MacroAssembler::AssertBoundFunction(Register object) {
2417 if (!FLAG_debug_code) return;
2418 ASM_CODE_COMMENT(this);
2419 testb(object, Immediate(kSmiTagMask));
2420 Check(not_equal, AbortReason::kOperandIsASmiAndNotABoundFunction);
2421 Push(object);
2422 CmpObjectType(object, JS_BOUND_FUNCTION_TYPE, object);
2423 Pop(object);
2424 Check(equal, AbortReason::kOperandIsNotABoundFunction);
2425 }
2426
2427 void MacroAssembler::AssertGeneratorObject(Register object) {
2428 if (!FLAG_debug_code) return;
2429 ASM_CODE_COMMENT(this);
2430 testb(object, Immediate(kSmiTagMask));
2431 Check(not_equal, AbortReason::kOperandIsASmiAndNotAGeneratorObject);
2432
2433 // Load map
2434 Register map = object;
2435 Push(object);
2436 LoadMap(map, object);
2437
2438 Label do_check;
2439 // Check if JSGeneratorObject
2440 CmpInstanceType(map, JS_GENERATOR_OBJECT_TYPE);
2441 j(equal, &do_check);
2442
2443 // Check if JSAsyncFunctionObject
2444 CmpInstanceType(map, JS_ASYNC_FUNCTION_OBJECT_TYPE);
2445 j(equal, &do_check);
2446
2447 // Check if JSAsyncGeneratorObject
2448 CmpInstanceType(map, JS_ASYNC_GENERATOR_OBJECT_TYPE);
2449
2450 bind(&do_check);
2451 // Restore generator object to register and perform assertion
2452 Pop(object);
2453 Check(equal, AbortReason::kOperandIsNotAGeneratorObject);
2454 }
2455
2456 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
2457 if (!FLAG_debug_code) return;
2458 ASM_CODE_COMMENT(this);
2459 Label done_checking;
2460 AssertNotSmi(object);
2461 Cmp(object, isolate()->factory()->undefined_value());
2462 j(equal, &done_checking);
2463 Register map = object;
2464 Push(object);
2465 LoadMap(map, object);
2466 Cmp(map, isolate()->factory()->allocation_site_map());
2467 Pop(object);
2468 Assert(equal, AbortReason::kExpectedUndefinedOrCell);
2469 bind(&done_checking);
2470 }
2471
2472 void MacroAssembler::LoadWeakValue(Register in_out, Label* target_if_cleared) {
2473 cmpl(in_out, Immediate(kClearedWeakHeapObjectLower32));
2474 j(equal, target_if_cleared);
2475
2476 andq(in_out, Immediate(~static_cast<int32_t>(kWeakHeapObjectMask)));
2477 }
2478
2479 void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value) {
2480 DCHECK_GT(value, 0);
2481 if (FLAG_native_code_counters && counter->Enabled()) {
2482 ASM_CODE_COMMENT(this);
2483 Operand counter_operand =
2484 ExternalReferenceAsOperand(ExternalReference::Create(counter));
2485 // This operation has to be exactly 32-bit wide in case the external
2486 // reference table redirects the counter to a uint32_t dummy_stats_counter_
2487 // field.
2488 if (value == 1) {
2489 incl(counter_operand);
2490 } else {
2491 addl(counter_operand, Immediate(value));
2492 }
2493 }
2494 }
2495
2496 void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value) {
2497 DCHECK_GT(value, 0);
2498 if (FLAG_native_code_counters && counter->Enabled()) {
2499 ASM_CODE_COMMENT(this);
2500 Operand counter_operand =
2501 ExternalReferenceAsOperand(ExternalReference::Create(counter));
2502 // This operation has to be exactly 32-bit wide in case the external
2503 // reference table redirects the counter to a uint32_t dummy_stats_counter_
2504 // field.
2505 if (value == 1) {
2506 decl(counter_operand);
2507 } else {
2508 subl(counter_operand, Immediate(value));
2509 }
2510 }
2511 }
2512
2513 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2514 Register actual_parameter_count,
2515 InvokeType type) {
2516 ASM_CODE_COMMENT(this);
2517 LoadTaggedPointerField(
2518 rbx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
2519 movzxwq(rbx,
2520 FieldOperand(rbx, SharedFunctionInfo::kFormalParameterCountOffset));
2521
2522 InvokeFunction(function, new_target, rbx, actual_parameter_count, type);
2523 }
2524
2525 void MacroAssembler::InvokeFunction(Register function, Register new_target,
2526 Register expected_parameter_count,
2527 Register actual_parameter_count,
2528 InvokeType type) {
2529 DCHECK_EQ(function, rdi);
2530 LoadTaggedPointerField(rsi,
2531 FieldOperand(function, JSFunction::kContextOffset));
2532 InvokeFunctionCode(rdi, new_target, expected_parameter_count,
2533 actual_parameter_count, type);
2534 }
2535
2536 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2537 Register expected_parameter_count,
2538 Register actual_parameter_count,
2539 InvokeType type) {
2540 ASM_CODE_COMMENT(this);
2541 // You can't call a function without a valid frame.
2542 DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
2543 DCHECK_EQ(function, rdi);
2544 DCHECK_IMPLIES(new_target.is_valid(), new_target == rdx);
2545
2546 // On function call, call into the debugger if necessary.
2547 Label debug_hook, continue_after_hook;
2548 {
2549 ExternalReference debug_hook_active =
2550 ExternalReference::debug_hook_on_function_call_address(isolate());
2551 Operand debug_hook_active_operand =
2552 ExternalReferenceAsOperand(debug_hook_active);
2553 cmpb(debug_hook_active_operand, Immediate(0));
2554 j(not_equal, &debug_hook);
2555 }
2556 bind(&continue_after_hook);
2557
2558 // Clear the new.target register if not given.
2559 if (!new_target.is_valid()) {
2560 LoadRoot(rdx, RootIndex::kUndefinedValue);
2561 }
2562
2563 Label done;
2564 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
2565 // We call indirectly through the code field in the function to
2566 // allow recompilation to take effect without changing any of the
2567 // call sites.
2568 static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
2569 LoadTaggedPointerField(rcx, FieldOperand(function, JSFunction::kCodeOffset));
2570 switch (type) {
2571 case InvokeType::kCall:
2572 CallCodeTObject(rcx);
2573 break;
2574 case InvokeType::kJump:
2575 JumpCodeTObject(rcx);
2576 break;
2577 }
2578 jmp(&done, Label::kNear);
2579
2580 // Deferred debug hook.
2581 bind(&debug_hook);
2582 CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
2583 actual_parameter_count);
2584 jmp(&continue_after_hook);
2585
2586 bind(&done);
2587 }
2588
2589 Operand MacroAssembler::StackLimitAsOperand(StackLimitKind kind) {
2590 DCHECK(root_array_available());
2591 Isolate* isolate = this->isolate();
2592 ExternalReference limit =
2593 kind == StackLimitKind::kRealStackLimit
2594 ? ExternalReference::address_of_real_jslimit(isolate)
2595 : ExternalReference::address_of_jslimit(isolate);
2596 DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
2597
2598 intptr_t offset =
2599 TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
2600 CHECK(is_int32(offset));
2601 return Operand(kRootRegister, static_cast<int32_t>(offset));
2602 }
2603
2604 void MacroAssembler::StackOverflowCheck(
2605 Register num_args, Label* stack_overflow,
2606 Label::Distance stack_overflow_distance) {
2607 ASM_CODE_COMMENT(this);
2608 DCHECK_NE(num_args, kScratchRegister);
2609 // Check the stack for overflow. We are not trying to catch
2610 // interruptions (e.g. debug break and preemption) here, so the "real stack
2611 // limit" is checked.
2612 movq(kScratchRegister, rsp);
2613 // Make kScratchRegister the space we have left. The stack might already be
2614 // overflowed here which will cause kScratchRegister to become negative.
2615 subq(kScratchRegister, StackLimitAsOperand(StackLimitKind::kRealStackLimit));
2616 // TODO(victorgomes): Use ia32 approach with leaq, since it requires less
2617 // instructions.
2618 sarq(kScratchRegister, Immediate(kSystemPointerSizeLog2));
2619 // Check if the arguments will overflow the stack.
2620 cmpq(kScratchRegister, num_args);
2621 // Signed comparison.
2622 // TODO(victorgomes): Save some bytes in the builtins that use stack checks
2623 // by jumping to a builtin that throws the exception.
2624 j(less_equal, stack_overflow, stack_overflow_distance);
2625 }
2626
2627 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
2628 Register actual_parameter_count,
2629 Label* done, InvokeType type) {
2630 ASM_CODE_COMMENT(this);
2631 if (expected_parameter_count == actual_parameter_count) {
2632 Move(rax, actual_parameter_count);
2633 return;
2634 }
2635 Label regular_invoke;
2636 // If the expected parameter count is equal to the adaptor sentinel, no need
2637 // to push undefined value as arguments.
2638 if (kDontAdaptArgumentsSentinel != 0) {
2639 cmpl(expected_parameter_count, Immediate(kDontAdaptArgumentsSentinel));
2640 j(equal, ®ular_invoke, Label::kFar);
2641 }
2642
2643 // If overapplication or if the actual argument count is equal to the
2644 // formal parameter count, no need to push extra undefined values.
2645 subq(expected_parameter_count, actual_parameter_count);
2646 j(less_equal, ®ular_invoke, Label::kFar);
2647
2648 Label stack_overflow;
2649 StackOverflowCheck(expected_parameter_count, &stack_overflow);
2650
2651 // Underapplication. Move the arguments already in the stack, including the
2652 // receiver and the return address.
2653 {
2654 Label copy, check;
2655 Register src = r8, dest = rsp, num = r9, current = r11;
2656 movq(src, rsp);
2657 leaq(kScratchRegister,
2658 Operand(expected_parameter_count, times_system_pointer_size, 0));
2659 AllocateStackSpace(kScratchRegister);
2660 // Extra words are for the return address (if a jump).
2661 int extra_words =
2662 type == InvokeType::kCall ? 0 : kReturnAddressStackSlotCount;
2663
2664 leaq(num, Operand(rax, extra_words)); // Number of words to copy.
2665 Move(current, 0);
2666 // Fall-through to the loop body because there are non-zero words to copy.
2667 bind(©);
2668 movq(kScratchRegister,
2669 Operand(src, current, times_system_pointer_size, 0));
2670 movq(Operand(dest, current, times_system_pointer_size, 0),
2671 kScratchRegister);
2672 incq(current);
2673 bind(&check);
2674 cmpq(current, num);
2675 j(less, ©);
2676 leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
2677 }
2678 // Fill remaining expected arguments with undefined values.
2679 LoadRoot(kScratchRegister, RootIndex::kUndefinedValue);
2680 {
2681 Label loop;
2682 bind(&loop);
2683 decq(expected_parameter_count);
2684 movq(Operand(r8, expected_parameter_count, times_system_pointer_size, 0),
2685 kScratchRegister);
2686 j(greater, &loop, Label::kNear);
2687 }
2688 jmp(®ular_invoke);
2689
2690 bind(&stack_overflow);
2691 {
2692 FrameScope frame(
2693 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
2694 CallRuntime(Runtime::kThrowStackOverflow);
2695 int3(); // This should be unreachable.
2696 }
2697 bind(®ular_invoke);
2698 }
2699
2700 void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
2701 Register expected_parameter_count,
2702 Register actual_parameter_count) {
2703 ASM_CODE_COMMENT(this);
2704 FrameScope frame(
2705 this, has_frame() ? StackFrame::NO_FRAME_TYPE : StackFrame::INTERNAL);
2706
2707 SmiTag(expected_parameter_count);
2708 Push(expected_parameter_count);
2709
2710 SmiTag(actual_parameter_count);
2711 Push(actual_parameter_count);
2712 SmiUntag(actual_parameter_count);
2713
2714 if (new_target.is_valid()) {
2715 Push(new_target);
2716 }
2717 Push(fun);
2718 Push(fun);
2719 // Arguments are located 2 words below the base pointer.
2720 Operand receiver_op = Operand(rbp, kSystemPointerSize * 2);
2721 Push(receiver_op);
2722 CallRuntime(Runtime::kDebugOnFunctionCall);
2723 Pop(fun);
2724 if (new_target.is_valid()) {
2725 Pop(new_target);
2726 }
2727 Pop(actual_parameter_count);
2728 SmiUntag(actual_parameter_count);
2729 Pop(expected_parameter_count);
2730 SmiUntag(expected_parameter_count);
2731 }
2732
2733 void TurboAssembler::StubPrologue(StackFrame::Type type) {
2734 ASM_CODE_COMMENT(this);
2735 pushq(rbp); // Caller's frame pointer.
2736 movq(rbp, rsp);
2737 Push(Immediate(StackFrame::TypeToMarker(type)));
2738 }
2739
2740 void TurboAssembler::Prologue() {
2741 ASM_CODE_COMMENT(this);
2742 pushq(rbp); // Caller's frame pointer.
2743 movq(rbp, rsp);
2744 Push(kContextRegister); // Callee's context.
2745 Push(kJSFunctionRegister); // Callee's JS function.
2746 Push(kJavaScriptCallArgCountRegister); // Actual argument count.
2747 }
2748
2749 void TurboAssembler::EnterFrame(StackFrame::Type type) {
2750 ASM_CODE_COMMENT(this);
2751 pushq(rbp);
2752 movq(rbp, rsp);
2753 if (!StackFrame::IsJavaScript(type)) {
2754 Push(Immediate(StackFrame::TypeToMarker(type)));
2755 }
2756 #if V8_ENABLE_WEBASSEMBLY
2757 if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
2758 #endif // V8_ENABLE_WEBASSEMBLY
2759 }
2760
2761 void TurboAssembler::LeaveFrame(StackFrame::Type type) {
2762 ASM_CODE_COMMENT(this);
2763 // TODO(v8:11429): Consider passing BASELINE instead, and checking for
2764 // IsJSFrame or similar. Could then unify with manual frame leaves in the
2765 // interpreter too.
2766 if (FLAG_debug_code && !StackFrame::IsJavaScript(type)) {
2767 cmpq(Operand(rbp, CommonFrameConstants::kContextOrFrameTypeOffset),
2768 Immediate(StackFrame::TypeToMarker(type)));
2769 Check(equal, AbortReason::kStackFrameTypesMustMatch);
2770 }
2771 movq(rsp, rbp);
2772 popq(rbp);
2773 }
2774
2775 #if defined(V8_TARGET_OS_WIN) || defined(V8_TARGET_OS_MACOS)
2776 void TurboAssembler::AllocateStackSpace(Register bytes_scratch) {
2777 ASM_CODE_COMMENT(this);
2778 // On Windows and on macOS, we cannot increment the stack size by more than
2779 // one page (minimum page size is 4KB) without accessing at least one byte on
2780 // the page. Check this:
2781 // https://msdn.microsoft.com/en-us/library/aa227153(v=vs.60).aspx.
2782 Label check_offset;
2783 Label touch_next_page;
2784 jmp(&check_offset);
2785 bind(&touch_next_page);
2786 subq(rsp, Immediate(kStackPageSize));
2787 // Just to touch the page, before we increment further.
2788 movb(Operand(rsp, 0), Immediate(0));
2789 subq(bytes_scratch, Immediate(kStackPageSize));
2790
2791 bind(&check_offset);
2792 cmpq(bytes_scratch, Immediate(kStackPageSize));
2793 j(greater_equal, &touch_next_page);
2794
2795 subq(rsp, bytes_scratch);
2796 }
2797
2798 void TurboAssembler::AllocateStackSpace(int bytes) {
2799 ASM_CODE_COMMENT(this);
2800 DCHECK_GE(bytes, 0);
2801 while (bytes >= kStackPageSize) {
2802 subq(rsp, Immediate(kStackPageSize));
2803 movb(Operand(rsp, 0), Immediate(0));
2804 bytes -= kStackPageSize;
2805 }
2806 if (bytes == 0) return;
2807 subq(rsp, Immediate(bytes));
2808 }
2809 #endif
2810
2811 void MacroAssembler::EnterExitFramePrologue(Register saved_rax_reg,
2812 StackFrame::Type frame_type) {
2813 ASM_CODE_COMMENT(this);
2814 DCHECK(frame_type == StackFrame::EXIT ||
2815 frame_type == StackFrame::BUILTIN_EXIT);
2816
2817 // Set up the frame structure on the stack.
2818 // All constants are relative to the frame pointer of the exit frame.
2819 DCHECK_EQ(kFPOnStackSize + kPCOnStackSize,
2820 ExitFrameConstants::kCallerSPDisplacement);
2821 DCHECK_EQ(kFPOnStackSize, ExitFrameConstants::kCallerPCOffset);
2822 DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
2823 pushq(rbp);
2824 movq(rbp, rsp);
2825
2826 // Reserve room for entry stack pointer.
2827 Push(Immediate(StackFrame::TypeToMarker(frame_type)));
2828 DCHECK_EQ(-2 * kSystemPointerSize, ExitFrameConstants::kSPOffset);
2829 Push(Immediate(0)); // Saved entry sp, patched before call.
2830
2831 // Save the frame pointer and the context in top.
2832 if (saved_rax_reg != no_reg) {
2833 movq(saved_rax_reg, rax); // Backup rax in callee-save register.
2834 }
2835
2836 Store(
2837 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()),
2838 rbp);
2839 Store(ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()),
2840 rsi);
2841 Store(
2842 ExternalReference::Create(IsolateAddressId::kCFunctionAddress, isolate()),
2843 rbx);
2844 }
2845
2846 #ifdef V8_TARGET_OS_WIN
2847 static const int kRegisterPassedArguments = 4;
2848 #else
2849 static const int kRegisterPassedArguments = 6;
2850 #endif
2851
2852 void MacroAssembler::EnterExitFrameEpilogue(int arg_stack_space,
2853 bool save_doubles) {
2854 ASM_CODE_COMMENT(this);
2855 #ifdef V8_TARGET_OS_WIN
2856 arg_stack_space += kRegisterPassedArguments;
2857 #endif
2858 // Optionally save all XMM registers.
2859 if (save_doubles) {
2860 int space = XMMRegister::kNumRegisters * kDoubleSize +
2861 arg_stack_space * kSystemPointerSize;
2862 AllocateStackSpace(space);
2863 int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2864 const RegisterConfiguration* config = RegisterConfiguration::Default();
2865 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2866 DoubleRegister reg =
2867 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2868 Movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg);
2869 }
2870 } else if (arg_stack_space > 0) {
2871 AllocateStackSpace(arg_stack_space * kSystemPointerSize);
2872 }
2873
2874 // Get the required frame alignment for the OS.
2875 const int kFrameAlignment = base::OS::ActivationFrameAlignment();
2876 if (kFrameAlignment > 0) {
2877 DCHECK(base::bits::IsPowerOfTwo(kFrameAlignment));
2878 DCHECK(is_int8(kFrameAlignment));
2879 andq(rsp, Immediate(-kFrameAlignment));
2880 }
2881
2882 // Patch the saved entry sp.
2883 movq(Operand(rbp, ExitFrameConstants::kSPOffset), rsp);
2884 }
2885
2886 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles,
2887 StackFrame::Type frame_type) {
2888 ASM_CODE_COMMENT(this);
2889 Register saved_rax_reg = r12;
2890 EnterExitFramePrologue(saved_rax_reg, frame_type);
2891
2892 // Set up argv in callee-saved register r15. It is reused in LeaveExitFrame,
2893 // so it must be retained across the C-call.
2894 int offset = StandardFrameConstants::kCallerSPOffset - kSystemPointerSize;
2895 leaq(r15, Operand(rbp, saved_rax_reg, times_system_pointer_size, offset));
2896
2897 EnterExitFrameEpilogue(arg_stack_space, save_doubles);
2898 }
2899
2900 void MacroAssembler::EnterApiExitFrame(int arg_stack_space) {
2901 ASM_CODE_COMMENT(this);
2902 EnterExitFramePrologue(no_reg, StackFrame::EXIT);
2903 EnterExitFrameEpilogue(arg_stack_space, false);
2904 }
2905
2906 void MacroAssembler::LeaveExitFrame(bool save_doubles, bool pop_arguments) {
2907 ASM_CODE_COMMENT(this);
2908 // Registers:
2909 // r15 : argv
2910 if (save_doubles) {
2911 int offset = -ExitFrameConstants::kFixedFrameSizeFromFp;
2912 const RegisterConfiguration* config = RegisterConfiguration::Default();
2913 for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
2914 DoubleRegister reg =
2915 DoubleRegister::from_code(config->GetAllocatableDoubleCode(i));
2916 Movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize)));
2917 }
2918 }
2919
2920 if (pop_arguments) {
2921 // Get the return address from the stack and restore the frame pointer.
2922 movq(rcx, Operand(rbp, kFPOnStackSize));
2923 movq(rbp, Operand(rbp, 0 * kSystemPointerSize));
2924
2925 // Drop everything up to and including the arguments and the receiver
2926 // from the caller stack.
2927 leaq(rsp, Operand(r15, 1 * kSystemPointerSize));
2928
2929 PushReturnAddressFrom(rcx);
2930 } else {
2931 // Otherwise just leave the exit frame.
2932 leave();
2933 }
2934
2935 LeaveExitFrameEpilogue();
2936 }
2937
2938 void MacroAssembler::LeaveApiExitFrame() {
2939 ASM_CODE_COMMENT(this);
2940 movq(rsp, rbp);
2941 popq(rbp);
2942
2943 LeaveExitFrameEpilogue();
2944 }
2945
2946 void MacroAssembler::LeaveExitFrameEpilogue() {
2947 ASM_CODE_COMMENT(this);
2948 // Restore current context from top and clear it in debug mode.
2949 ExternalReference context_address =
2950 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate());
2951 Operand context_operand = ExternalReferenceAsOperand(context_address);
2952 movq(rsi, context_operand);
2953 #ifdef DEBUG
2954 Move(context_operand, Context::kInvalidContext);
2955 #endif
2956
2957 // Clear the top frame.
2958 ExternalReference c_entry_fp_address =
2959 ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate());
2960 Operand c_entry_fp_operand = ExternalReferenceAsOperand(c_entry_fp_address);
2961 Move(c_entry_fp_operand, 0);
2962 }
2963
2964 void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
2965 ASM_CODE_COMMENT(this);
2966 // Load native context.
2967 LoadMap(dst, rsi);
2968 LoadTaggedPointerField(
2969 dst,
2970 FieldOperand(dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
2971 // Load value from native context.
2972 LoadTaggedPointerField(dst, Operand(dst, Context::SlotOffset(index)));
2973 }
2974
2975 int TurboAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
2976 // On Windows 64 stack slots are reserved by the caller for all arguments
2977 // including the ones passed in registers, and space is always allocated for
2978 // the four register arguments even if the function takes fewer than four
2979 // arguments.
2980 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
2981 // and the caller does not reserve stack slots for them.
2982 DCHECK_GE(num_arguments, 0);
2983 #ifdef V8_TARGET_OS_WIN
2984 const int kMinimumStackSlots = kRegisterPassedArguments;
2985 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
2986 return num_arguments;
2987 #else
2988 if (num_arguments < kRegisterPassedArguments) return 0;
2989 return num_arguments - kRegisterPassedArguments;
2990 #endif
2991 }
2992
2993 void TurboAssembler::PrepareCallCFunction(int num_arguments) {
2994 ASM_CODE_COMMENT(this);
2995 int frame_alignment = base::OS::ActivationFrameAlignment();
2996 DCHECK_NE(frame_alignment, 0);
2997 DCHECK_GE(num_arguments, 0);
2998
2999 // Make stack end at alignment and allocate space for arguments and old rsp.
3000 movq(kScratchRegister, rsp);
3001 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
3002 int argument_slots_on_stack =
3003 ArgumentStackSlotsForCFunctionCall(num_arguments);
3004 AllocateStackSpace((argument_slots_on_stack + 1) * kSystemPointerSize);
3005 andq(rsp, Immediate(-frame_alignment));
3006 movq(Operand(rsp, argument_slots_on_stack * kSystemPointerSize),
3007 kScratchRegister);
3008 }
3009
3010 void TurboAssembler::CallCFunction(ExternalReference function,
3011 int num_arguments) {
3012 ASM_CODE_COMMENT(this);
3013 LoadAddress(rax, function);
3014 CallCFunction(rax, num_arguments);
3015 }
3016
3017 void TurboAssembler::CallCFunction(Register function, int num_arguments) {
3018 ASM_CODE_COMMENT(this);
3019 DCHECK_LE(num_arguments, kMaxCParameters);
3020 DCHECK(has_frame());
3021 // Check stack alignment.
3022 if (FLAG_debug_code) {
3023 CheckStackAlignment();
3024 }
3025
3026 // Save the frame pointer and PC so that the stack layout remains iterable,
3027 // even without an ExitFrame which normally exists between JS and C frames.
3028 Label get_pc;
3029 DCHECK(!AreAliased(kScratchRegister, function));
3030 leaq(kScratchRegister, Operand(&get_pc, 0));
3031 bind(&get_pc);
3032
3033 // Addressing the following external references is tricky because we need
3034 // this to work in three situations:
3035 // 1. In wasm compilation, the isolate is nullptr and thus no
3036 // ExternalReference can be created, but we can construct the address
3037 // directly using the root register and a static offset.
3038 // 2. In normal JIT (and builtin) compilation, the external reference is
3039 // usually addressed through the root register, so we can use the direct
3040 // offset directly in most cases.
3041 // 3. In regexp compilation, the external reference is embedded into the reloc
3042 // info.
3043 // The solution here is to use root register offsets wherever possible in
3044 // which case we can construct it directly. When falling back to external
3045 // references we need to ensure that the scratch register does not get
3046 // accidentally overwritten. If we run into more such cases in the future, we
3047 // should implement a more general solution.
3048 if (root_array_available()) {
3049 movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_pc_offset()),
3050 kScratchRegister);
3051 movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
3052 rbp);
3053 } else {
3054 DCHECK_NOT_NULL(isolate());
3055 // Use alternative scratch register in order not to overwrite
3056 // kScratchRegister.
3057 Register scratch = r12;
3058 pushq(scratch);
3059
3060 movq(ExternalReferenceAsOperand(
3061 ExternalReference::fast_c_call_caller_pc_address(isolate()),
3062 scratch),
3063 kScratchRegister);
3064 movq(ExternalReferenceAsOperand(
3065 ExternalReference::fast_c_call_caller_fp_address(isolate())),
3066 rbp);
3067
3068 popq(scratch);
3069 }
3070
3071 call(function);
3072
3073 // We don't unset the PC; the FP is the source of truth.
3074 if (root_array_available()) {
3075 movq(Operand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()),
3076 Immediate(0));
3077 } else {
3078 DCHECK_NOT_NULL(isolate());
3079 movq(ExternalReferenceAsOperand(
3080 ExternalReference::fast_c_call_caller_fp_address(isolate())),
3081 Immediate(0));
3082 }
3083
3084 DCHECK_NE(base::OS::ActivationFrameAlignment(), 0);
3085 DCHECK_GE(num_arguments, 0);
3086 int argument_slots_on_stack =
3087 ArgumentStackSlotsForCFunctionCall(num_arguments);
3088 movq(rsp, Operand(rsp, argument_slots_on_stack * kSystemPointerSize));
3089 }
3090
3091 void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
3092 Condition cc, Label* condition_met,
3093 Label::Distance condition_met_distance) {
3094 ASM_CODE_COMMENT(this);
3095 DCHECK(cc == zero || cc == not_zero);
3096 if (scratch == object) {
3097 andq(scratch, Immediate(~kPageAlignmentMask));
3098 } else {
3099 movq(scratch, Immediate(~kPageAlignmentMask));
3100 andq(scratch, object);
3101 }
3102 if (mask < (1 << kBitsPerByte)) {
3103 testb(Operand(scratch, BasicMemoryChunk::kFlagsOffset),
3104 Immediate(static_cast<uint8_t>(mask)));
3105 } else {
3106 testl(Operand(scratch, BasicMemoryChunk::kFlagsOffset), Immediate(mask));
3107 }
3108 j(cc, condition_met, condition_met_distance);
3109 }
3110
3111 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
3112 Label current;
3113 bind(¤t);
3114 int pc = pc_offset();
3115 // Load effective address to get the address of the current instruction.
3116 leaq(dst, Operand(¤t, -pc));
3117 }
3118
3119 // Check if the code object is marked for deoptimization. If it is, then it
3120 // jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
3121 // to:
3122 // 1. read from memory the word that contains that bit, which can be found in
3123 // the flags in the referenced {CodeDataContainer} object;
3124 // 2. test kMarkedForDeoptimizationBit in those flags; and
3125 // 3. if it is not zero then it jumps to the builtin.
3126 void TurboAssembler::BailoutIfDeoptimized(Register scratch) {
3127 int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
3128 LoadTaggedPointerField(scratch,
3129 Operand(kJavaScriptCallCodeStartRegister, offset));
3130 testl(FieldOperand(scratch, CodeDataContainer::kKindSpecificFlagsOffset),
3131 Immediate(1 << Code::kMarkedForDeoptimizationBit));
3132 Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
3133 RelocInfo::CODE_TARGET, not_zero);
3134 }
3135
3136 void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
3137 DeoptimizeKind kind, Label* ret,
3138 Label*) {
3139 ASM_CODE_COMMENT(this);
3140 // Note: Assembler::call is used here on purpose to guarantee fixed-size
3141 // exits even on Atom CPUs; see TurboAssembler::Call for Atom-specific
3142 // performance tuning which emits a different instruction sequence.
3143 call(EntryFromBuiltinAsOperand(target));
3144 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
3145 (kind == DeoptimizeKind::kLazy) ? Deoptimizer::kLazyDeoptExitSize
3146 : Deoptimizer::kEagerDeoptExitSize);
3147 }
3148
3149 void TurboAssembler::Trap() { int3(); }
3150 void TurboAssembler::DebugBreak() { int3(); }
3151
3152 } // namespace internal
3153 } // namespace v8
3154
3155 #endif // V8_TARGET_ARCH_X64
3156