1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM64
6
7 #include "src/base/bits.h"
8 #include "src/base/division-by-constant.h"
9 #include "src/bootstrapper.h"
10 #include "src/codegen.h"
11 #include "src/debug/debug.h"
12 #include "src/register-configuration.h"
13 #include "src/runtime/runtime.h"
14
15 #include "src/arm64/frames-arm64.h"
16 #include "src/arm64/macro-assembler-arm64.h"
17
18 namespace v8 {
19 namespace internal {
20
21 // Define a fake double underscore to use with the ASM_UNIMPLEMENTED macros.
22 #define __
23
24
MacroAssembler(Isolate * arg_isolate,byte * buffer,unsigned buffer_size,CodeObjectRequired create_code_object)25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, byte* buffer,
26 unsigned buffer_size,
27 CodeObjectRequired create_code_object)
28 : Assembler(arg_isolate, buffer, buffer_size),
29 generating_stub_(false),
30 #if DEBUG
31 allow_macro_instructions_(true),
32 #endif
33 has_frame_(false),
34 use_real_aborts_(true),
35 sp_(jssp),
36 tmp_list_(DefaultTmpList()),
37 fptmp_list_(DefaultFPTmpList()) {
38 if (create_code_object == CodeObjectRequired::kYes) {
39 code_object_ =
40 Handle<Object>::New(isolate()->heap()->undefined_value(), isolate());
41 }
42 }
43
44
DefaultTmpList()45 CPURegList MacroAssembler::DefaultTmpList() {
46 return CPURegList(ip0, ip1);
47 }
48
49
DefaultFPTmpList()50 CPURegList MacroAssembler::DefaultFPTmpList() {
51 return CPURegList(fp_scratch1, fp_scratch2);
52 }
53
54
LogicalMacro(const Register & rd,const Register & rn,const Operand & operand,LogicalOp op)55 void MacroAssembler::LogicalMacro(const Register& rd,
56 const Register& rn,
57 const Operand& operand,
58 LogicalOp op) {
59 UseScratchRegisterScope temps(this);
60
61 if (operand.NeedsRelocation(this)) {
62 Register temp = temps.AcquireX();
63 Ldr(temp, operand.immediate());
64 Logical(rd, rn, temp, op);
65
66 } else if (operand.IsImmediate()) {
67 int64_t immediate = operand.ImmediateValue();
68 unsigned reg_size = rd.SizeInBits();
69
70 // If the operation is NOT, invert the operation and immediate.
71 if ((op & NOT) == NOT) {
72 op = static_cast<LogicalOp>(op & ~NOT);
73 immediate = ~immediate;
74 }
75
76 // Ignore the top 32 bits of an immediate if we're moving to a W register.
77 if (rd.Is32Bits()) {
78 // Check that the top 32 bits are consistent.
79 DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
80 ((immediate >> kWRegSizeInBits) == -1));
81 immediate &= kWRegMask;
82 }
83
84 DCHECK(rd.Is64Bits() || is_uint32(immediate));
85
86 // Special cases for all set or all clear immediates.
87 if (immediate == 0) {
88 switch (op) {
89 case AND:
90 Mov(rd, 0);
91 return;
92 case ORR: // Fall through.
93 case EOR:
94 Mov(rd, rn);
95 return;
96 case ANDS: // Fall through.
97 case BICS:
98 break;
99 default:
100 UNREACHABLE();
101 }
102 } else if ((rd.Is64Bits() && (immediate == -1L)) ||
103 (rd.Is32Bits() && (immediate == 0xffffffffL))) {
104 switch (op) {
105 case AND:
106 Mov(rd, rn);
107 return;
108 case ORR:
109 Mov(rd, immediate);
110 return;
111 case EOR:
112 Mvn(rd, rn);
113 return;
114 case ANDS: // Fall through.
115 case BICS:
116 break;
117 default:
118 UNREACHABLE();
119 }
120 }
121
122 unsigned n, imm_s, imm_r;
123 if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
124 // Immediate can be encoded in the instruction.
125 LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
126 } else {
127 // Immediate can't be encoded: synthesize using move immediate.
128 Register temp = temps.AcquireSameSizeAs(rn);
129 Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
130 if (rd.Is(csp)) {
131 // If rd is the stack pointer we cannot use it as the destination
132 // register so we use the temp register as an intermediate again.
133 Logical(temp, rn, imm_operand, op);
134 Mov(csp, temp);
135 AssertStackConsistency();
136 } else {
137 Logical(rd, rn, imm_operand, op);
138 }
139 }
140
141 } else if (operand.IsExtendedRegister()) {
142 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
143 // Add/sub extended supports shift <= 4. We want to support exactly the
144 // same modes here.
145 DCHECK(operand.shift_amount() <= 4);
146 DCHECK(operand.reg().Is64Bits() ||
147 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
148 Register temp = temps.AcquireSameSizeAs(rn);
149 EmitExtendShift(temp, operand.reg(), operand.extend(),
150 operand.shift_amount());
151 Logical(rd, rn, temp, op);
152
153 } else {
154 // The operand can be encoded in the instruction.
155 DCHECK(operand.IsShiftedRegister());
156 Logical(rd, rn, operand, op);
157 }
158 }
159
160
Mov(const Register & rd,uint64_t imm)161 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
162 DCHECK(allow_macro_instructions_);
163 DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
164 DCHECK(!rd.IsZero());
165
166 // TODO(all) extend to support more immediates.
167 //
168 // Immediates on Aarch64 can be produced using an initial value, and zero to
169 // three move keep operations.
170 //
171 // Initial values can be generated with:
172 // 1. 64-bit move zero (movz).
173 // 2. 32-bit move inverted (movn).
174 // 3. 64-bit move inverted.
175 // 4. 32-bit orr immediate.
176 // 5. 64-bit orr immediate.
177 // Move-keep may then be used to modify each of the 16-bit half-words.
178 //
179 // The code below supports all five initial value generators, and
180 // applying move-keep operations to move-zero and move-inverted initial
181 // values.
182
183 // Try to move the immediate in one instruction, and if that fails, switch to
184 // using multiple instructions.
185 if (!TryOneInstrMoveImmediate(rd, imm)) {
186 unsigned reg_size = rd.SizeInBits();
187
188 // Generic immediate case. Imm will be represented by
189 // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
190 // A move-zero or move-inverted is generated for the first non-zero or
191 // non-0xffff immX, and a move-keep for subsequent non-zero immX.
192
193 uint64_t ignored_halfword = 0;
194 bool invert_move = false;
195 // If the number of 0xffff halfwords is greater than the number of 0x0000
196 // halfwords, it's more efficient to use move-inverted.
197 if (CountClearHalfWords(~imm, reg_size) >
198 CountClearHalfWords(imm, reg_size)) {
199 ignored_halfword = 0xffffL;
200 invert_move = true;
201 }
202
203 // Mov instructions can't move immediate values into the stack pointer, so
204 // set up a temporary register, if needed.
205 UseScratchRegisterScope temps(this);
206 Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
207
208 // Iterate through the halfwords. Use movn/movz for the first non-ignored
209 // halfword, and movk for subsequent halfwords.
210 DCHECK((reg_size % 16) == 0);
211 bool first_mov_done = false;
212 for (int i = 0; i < (rd.SizeInBits() / 16); i++) {
213 uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
214 if (imm16 != ignored_halfword) {
215 if (!first_mov_done) {
216 if (invert_move) {
217 movn(temp, (~imm16) & 0xffffL, 16 * i);
218 } else {
219 movz(temp, imm16, 16 * i);
220 }
221 first_mov_done = true;
222 } else {
223 // Construct a wider constant.
224 movk(temp, imm16, 16 * i);
225 }
226 }
227 }
228 DCHECK(first_mov_done);
229
230 // Move the temporary if the original destination register was the stack
231 // pointer.
232 if (rd.IsSP()) {
233 mov(rd, temp);
234 AssertStackConsistency();
235 }
236 }
237 }
238
239
Mov(const Register & rd,const Operand & operand,DiscardMoveMode discard_mode)240 void MacroAssembler::Mov(const Register& rd,
241 const Operand& operand,
242 DiscardMoveMode discard_mode) {
243 DCHECK(allow_macro_instructions_);
244 DCHECK(!rd.IsZero());
245
246 // Provide a swap register for instructions that need to write into the
247 // system stack pointer (and can't do this inherently).
248 UseScratchRegisterScope temps(this);
249 Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
250
251 if (operand.NeedsRelocation(this)) {
252 Ldr(dst, operand.immediate());
253
254 } else if (operand.IsImmediate()) {
255 // Call the macro assembler for generic immediates.
256 Mov(dst, operand.ImmediateValue());
257
258 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
259 // Emit a shift instruction if moving a shifted register. This operation
260 // could also be achieved using an orr instruction (like orn used by Mvn),
261 // but using a shift instruction makes the disassembly clearer.
262 EmitShift(dst, operand.reg(), operand.shift(), operand.shift_amount());
263
264 } else if (operand.IsExtendedRegister()) {
265 // Emit an extend instruction if moving an extended register. This handles
266 // extend with post-shift operations, too.
267 EmitExtendShift(dst, operand.reg(), operand.extend(),
268 operand.shift_amount());
269
270 } else {
271 // Otherwise, emit a register move only if the registers are distinct, or
272 // if they are not X registers.
273 //
274 // Note that mov(w0, w0) is not a no-op because it clears the top word of
275 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
276 // registers is not required to clear the top word of the X register. In
277 // this case, the instruction is discarded.
278 //
279 // If csp is an operand, add #0 is emitted, otherwise, orr #0.
280 if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
281 (discard_mode == kDontDiscardForSameWReg))) {
282 Assembler::mov(rd, operand.reg());
283 }
284 // This case can handle writes into the system stack pointer directly.
285 dst = rd;
286 }
287
288 // Copy the result to the system stack pointer.
289 if (!dst.Is(rd)) {
290 DCHECK(rd.IsSP());
291 Assembler::mov(rd, dst);
292 }
293 }
294
295
Mvn(const Register & rd,const Operand & operand)296 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
297 DCHECK(allow_macro_instructions_);
298
299 if (operand.NeedsRelocation(this)) {
300 Ldr(rd, operand.immediate());
301 mvn(rd, rd);
302
303 } else if (operand.IsImmediate()) {
304 // Call the macro assembler for generic immediates.
305 Mov(rd, ~operand.ImmediateValue());
306
307 } else if (operand.IsExtendedRegister()) {
308 // Emit two instructions for the extend case. This differs from Mov, as
309 // the extend and invert can't be achieved in one instruction.
310 EmitExtendShift(rd, operand.reg(), operand.extend(),
311 operand.shift_amount());
312 mvn(rd, rd);
313
314 } else {
315 mvn(rd, operand);
316 }
317 }
318
319
CountClearHalfWords(uint64_t imm,unsigned reg_size)320 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
321 DCHECK((reg_size % 8) == 0);
322 int count = 0;
323 for (unsigned i = 0; i < (reg_size / 16); i++) {
324 if ((imm & 0xffff) == 0) {
325 count++;
326 }
327 imm >>= 16;
328 }
329 return count;
330 }
331
332
333 // The movz instruction can generate immediates containing an arbitrary 16-bit
334 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
IsImmMovz(uint64_t imm,unsigned reg_size)335 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
336 DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
337 return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
338 }
339
340
341 // The movn instruction can generate immediates containing an arbitrary 16-bit
342 // half-word, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
IsImmMovn(uint64_t imm,unsigned reg_size)343 bool MacroAssembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
344 return IsImmMovz(~imm, reg_size);
345 }
346
347
ConditionalCompareMacro(const Register & rn,const Operand & operand,StatusFlags nzcv,Condition cond,ConditionalCompareOp op)348 void MacroAssembler::ConditionalCompareMacro(const Register& rn,
349 const Operand& operand,
350 StatusFlags nzcv,
351 Condition cond,
352 ConditionalCompareOp op) {
353 DCHECK((cond != al) && (cond != nv));
354 if (operand.NeedsRelocation(this)) {
355 UseScratchRegisterScope temps(this);
356 Register temp = temps.AcquireX();
357 Ldr(temp, operand.immediate());
358 ConditionalCompareMacro(rn, temp, nzcv, cond, op);
359
360 } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
361 (operand.IsImmediate() &&
362 IsImmConditionalCompare(operand.ImmediateValue()))) {
363 // The immediate can be encoded in the instruction, or the operand is an
364 // unshifted register: call the assembler.
365 ConditionalCompare(rn, operand, nzcv, cond, op);
366
367 } else {
368 // The operand isn't directly supported by the instruction: perform the
369 // operation on a temporary register.
370 UseScratchRegisterScope temps(this);
371 Register temp = temps.AcquireSameSizeAs(rn);
372 Mov(temp, operand);
373 ConditionalCompare(rn, temp, nzcv, cond, op);
374 }
375 }
376
377
Csel(const Register & rd,const Register & rn,const Operand & operand,Condition cond)378 void MacroAssembler::Csel(const Register& rd,
379 const Register& rn,
380 const Operand& operand,
381 Condition cond) {
382 DCHECK(allow_macro_instructions_);
383 DCHECK(!rd.IsZero());
384 DCHECK((cond != al) && (cond != nv));
385 if (operand.IsImmediate()) {
386 // Immediate argument. Handle special cases of 0, 1 and -1 using zero
387 // register.
388 int64_t imm = operand.ImmediateValue();
389 Register zr = AppropriateZeroRegFor(rn);
390 if (imm == 0) {
391 csel(rd, rn, zr, cond);
392 } else if (imm == 1) {
393 csinc(rd, rn, zr, cond);
394 } else if (imm == -1) {
395 csinv(rd, rn, zr, cond);
396 } else {
397 UseScratchRegisterScope temps(this);
398 Register temp = temps.AcquireSameSizeAs(rn);
399 Mov(temp, imm);
400 csel(rd, rn, temp, cond);
401 }
402 } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
403 // Unshifted register argument.
404 csel(rd, rn, operand.reg(), cond);
405 } else {
406 // All other arguments.
407 UseScratchRegisterScope temps(this);
408 Register temp = temps.AcquireSameSizeAs(rn);
409 Mov(temp, operand);
410 csel(rd, rn, temp, cond);
411 }
412 }
413
414
TryOneInstrMoveImmediate(const Register & dst,int64_t imm)415 bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
416 int64_t imm) {
417 unsigned n, imm_s, imm_r;
418 int reg_size = dst.SizeInBits();
419 if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
420 // Immediate can be represented in a move zero instruction. Movz can't write
421 // to the stack pointer.
422 movz(dst, imm);
423 return true;
424 } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
425 // Immediate can be represented in a move not instruction. Movn can't write
426 // to the stack pointer.
427 movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
428 return true;
429 } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
430 // Immediate can be represented in a logical orr instruction.
431 LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
432 return true;
433 }
434 return false;
435 }
436
437
MoveImmediateForShiftedOp(const Register & dst,int64_t imm)438 Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
439 int64_t imm) {
440 int reg_size = dst.SizeInBits();
441
442 // Encode the immediate in a single move instruction, if possible.
443 if (TryOneInstrMoveImmediate(dst, imm)) {
444 // The move was successful; nothing to do here.
445 } else {
446 // Pre-shift the immediate to the least-significant bits of the register.
447 int shift_low = CountTrailingZeros(imm, reg_size);
448 int64_t imm_low = imm >> shift_low;
449
450 // Pre-shift the immediate to the most-significant bits of the register. We
451 // insert set bits in the least-significant bits, as this creates a
452 // different immediate that may be encodable using movn or orr-immediate.
453 // If this new immediate is encodable, the set bits will be eliminated by
454 // the post shift on the following instruction.
455 int shift_high = CountLeadingZeros(imm, reg_size);
456 int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
457
458 if (TryOneInstrMoveImmediate(dst, imm_low)) {
459 // The new immediate has been moved into the destination's low bits:
460 // return a new leftward-shifting operand.
461 return Operand(dst, LSL, shift_low);
462 } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
463 // The new immediate has been moved into the destination's high bits:
464 // return a new rightward-shifting operand.
465 return Operand(dst, LSR, shift_high);
466 } else {
467 // Use the generic move operation to set up the immediate.
468 Mov(dst, imm);
469 }
470 }
471 return Operand(dst);
472 }
473
474
AddSubMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubOp op)475 void MacroAssembler::AddSubMacro(const Register& rd,
476 const Register& rn,
477 const Operand& operand,
478 FlagsUpdate S,
479 AddSubOp op) {
480 if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
481 !operand.NeedsRelocation(this) && (S == LeaveFlags)) {
482 // The instruction would be a nop. Avoid generating useless code.
483 return;
484 }
485
486 if (operand.NeedsRelocation(this)) {
487 UseScratchRegisterScope temps(this);
488 Register temp = temps.AcquireX();
489 Ldr(temp, operand.immediate());
490 AddSubMacro(rd, rn, temp, S, op);
491 } else if ((operand.IsImmediate() &&
492 !IsImmAddSub(operand.ImmediateValue())) ||
493 (rn.IsZero() && !operand.IsShiftedRegister()) ||
494 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
495 UseScratchRegisterScope temps(this);
496 Register temp = temps.AcquireSameSizeAs(rn);
497 if (operand.IsImmediate()) {
498 Operand imm_operand =
499 MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
500 AddSub(rd, rn, imm_operand, S, op);
501 } else {
502 Mov(temp, operand);
503 AddSub(rd, rn, temp, S, op);
504 }
505 } else {
506 AddSub(rd, rn, operand, S, op);
507 }
508 }
509
510
AddSubWithCarryMacro(const Register & rd,const Register & rn,const Operand & operand,FlagsUpdate S,AddSubWithCarryOp op)511 void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
512 const Register& rn,
513 const Operand& operand,
514 FlagsUpdate S,
515 AddSubWithCarryOp op) {
516 DCHECK(rd.SizeInBits() == rn.SizeInBits());
517 UseScratchRegisterScope temps(this);
518
519 if (operand.NeedsRelocation(this)) {
520 Register temp = temps.AcquireX();
521 Ldr(temp, operand.immediate());
522 AddSubWithCarryMacro(rd, rn, temp, S, op);
523
524 } else if (operand.IsImmediate() ||
525 (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
526 // Add/sub with carry (immediate or ROR shifted register.)
527 Register temp = temps.AcquireSameSizeAs(rn);
528 Mov(temp, operand);
529 AddSubWithCarry(rd, rn, temp, S, op);
530
531 } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
532 // Add/sub with carry (shifted register).
533 DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
534 DCHECK(operand.shift() != ROR);
535 DCHECK(is_uintn(operand.shift_amount(),
536 rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
537 : kWRegSizeInBitsLog2));
538 Register temp = temps.AcquireSameSizeAs(rn);
539 EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
540 AddSubWithCarry(rd, rn, temp, S, op);
541
542 } else if (operand.IsExtendedRegister()) {
543 // Add/sub with carry (extended register).
544 DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
545 // Add/sub extended supports a shift <= 4. We want to support exactly the
546 // same modes.
547 DCHECK(operand.shift_amount() <= 4);
548 DCHECK(operand.reg().Is64Bits() ||
549 ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
550 Register temp = temps.AcquireSameSizeAs(rn);
551 EmitExtendShift(temp, operand.reg(), operand.extend(),
552 operand.shift_amount());
553 AddSubWithCarry(rd, rn, temp, S, op);
554
555 } else {
556 // The addressing mode is directly supported by the instruction.
557 AddSubWithCarry(rd, rn, operand, S, op);
558 }
559 }
560
561
LoadStoreMacro(const CPURegister & rt,const MemOperand & addr,LoadStoreOp op)562 void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
563 const MemOperand& addr,
564 LoadStoreOp op) {
565 int64_t offset = addr.offset();
566 LSDataSize size = CalcLSDataSize(op);
567
568 // Check if an immediate offset fits in the immediate field of the
569 // appropriate instruction. If not, emit two instructions to perform
570 // the operation.
571 if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, size) &&
572 !IsImmLSUnscaled(offset)) {
573 // Immediate offset that can't be encoded using unsigned or unscaled
574 // addressing modes.
575 UseScratchRegisterScope temps(this);
576 Register temp = temps.AcquireSameSizeAs(addr.base());
577 Mov(temp, addr.offset());
578 LoadStore(rt, MemOperand(addr.base(), temp), op);
579 } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
580 // Post-index beyond unscaled addressing range.
581 LoadStore(rt, MemOperand(addr.base()), op);
582 add(addr.base(), addr.base(), offset);
583 } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
584 // Pre-index beyond unscaled addressing range.
585 add(addr.base(), addr.base(), offset);
586 LoadStore(rt, MemOperand(addr.base()), op);
587 } else {
588 // Encodable in one load/store instruction.
589 LoadStore(rt, addr, op);
590 }
591 }
592
LoadStorePairMacro(const CPURegister & rt,const CPURegister & rt2,const MemOperand & addr,LoadStorePairOp op)593 void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
594 const CPURegister& rt2,
595 const MemOperand& addr,
596 LoadStorePairOp op) {
597 // TODO(all): Should we support register offset for load-store-pair?
598 DCHECK(!addr.IsRegisterOffset());
599
600 int64_t offset = addr.offset();
601 LSDataSize size = CalcLSPairDataSize(op);
602
603 // Check if the offset fits in the immediate field of the appropriate
604 // instruction. If not, emit two instructions to perform the operation.
605 if (IsImmLSPair(offset, size)) {
606 // Encodable in one load/store pair instruction.
607 LoadStorePair(rt, rt2, addr, op);
608 } else {
609 Register base = addr.base();
610 if (addr.IsImmediateOffset()) {
611 UseScratchRegisterScope temps(this);
612 Register temp = temps.AcquireSameSizeAs(base);
613 Add(temp, base, offset);
614 LoadStorePair(rt, rt2, MemOperand(temp), op);
615 } else if (addr.IsPostIndex()) {
616 LoadStorePair(rt, rt2, MemOperand(base), op);
617 Add(base, base, offset);
618 } else {
619 DCHECK(addr.IsPreIndex());
620 Add(base, base, offset);
621 LoadStorePair(rt, rt2, MemOperand(base), op);
622 }
623 }
624 }
625
626
Load(const Register & rt,const MemOperand & addr,Representation r)627 void MacroAssembler::Load(const Register& rt,
628 const MemOperand& addr,
629 Representation r) {
630 DCHECK(!r.IsDouble());
631
632 if (r.IsInteger8()) {
633 Ldrsb(rt, addr);
634 } else if (r.IsUInteger8()) {
635 Ldrb(rt, addr);
636 } else if (r.IsInteger16()) {
637 Ldrsh(rt, addr);
638 } else if (r.IsUInteger16()) {
639 Ldrh(rt, addr);
640 } else if (r.IsInteger32()) {
641 Ldr(rt.W(), addr);
642 } else {
643 DCHECK(rt.Is64Bits());
644 Ldr(rt, addr);
645 }
646 }
647
648
Store(const Register & rt,const MemOperand & addr,Representation r)649 void MacroAssembler::Store(const Register& rt,
650 const MemOperand& addr,
651 Representation r) {
652 DCHECK(!r.IsDouble());
653
654 if (r.IsInteger8() || r.IsUInteger8()) {
655 Strb(rt, addr);
656 } else if (r.IsInteger16() || r.IsUInteger16()) {
657 Strh(rt, addr);
658 } else if (r.IsInteger32()) {
659 Str(rt.W(), addr);
660 } else {
661 DCHECK(rt.Is64Bits());
662 if (r.IsHeapObject()) {
663 AssertNotSmi(rt);
664 } else if (r.IsSmi()) {
665 AssertSmi(rt);
666 }
667 Str(rt, addr);
668 }
669 }
670
671
NeedExtraInstructionsOrRegisterBranch(Label * label,ImmBranchType b_type)672 bool MacroAssembler::NeedExtraInstructionsOrRegisterBranch(
673 Label *label, ImmBranchType b_type) {
674 bool need_longer_range = false;
675 // There are two situations in which we care about the offset being out of
676 // range:
677 // - The label is bound but too far away.
678 // - The label is not bound but linked, and the previous branch
679 // instruction in the chain is too far away.
680 if (label->is_bound() || label->is_linked()) {
681 need_longer_range =
682 !Instruction::IsValidImmPCOffset(b_type, label->pos() - pc_offset());
683 }
684 if (!need_longer_range && !label->is_bound()) {
685 int max_reachable_pc = pc_offset() + Instruction::ImmBranchRange(b_type);
686 unresolved_branches_.insert(
687 std::pair<int, FarBranchInfo>(max_reachable_pc,
688 FarBranchInfo(pc_offset(), label)));
689 // Also maintain the next pool check.
690 next_veneer_pool_check_ =
691 Min(next_veneer_pool_check_,
692 max_reachable_pc - kVeneerDistanceCheckMargin);
693 }
694 return need_longer_range;
695 }
696
697
Adr(const Register & rd,Label * label,AdrHint hint)698 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
699 DCHECK(allow_macro_instructions_);
700 DCHECK(!rd.IsZero());
701
702 if (hint == kAdrNear) {
703 adr(rd, label);
704 return;
705 }
706
707 DCHECK(hint == kAdrFar);
708 if (label->is_bound()) {
709 int label_offset = label->pos() - pc_offset();
710 if (Instruction::IsValidPCRelOffset(label_offset)) {
711 adr(rd, label);
712 } else {
713 DCHECK(label_offset <= 0);
714 int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
715 adr(rd, min_adr_offset);
716 Add(rd, rd, label_offset - min_adr_offset);
717 }
718 } else {
719 UseScratchRegisterScope temps(this);
720 Register scratch = temps.AcquireX();
721
722 InstructionAccurateScope scope(
723 this, PatchingAssembler::kAdrFarPatchableNInstrs);
724 adr(rd, label);
725 for (int i = 0; i < PatchingAssembler::kAdrFarPatchableNNops; ++i) {
726 nop(ADR_FAR_NOP);
727 }
728 movz(scratch, 0);
729 }
730 }
731
732
B(Label * label,BranchType type,Register reg,int bit)733 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
734 DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
735 (bit == -1 || type >= kBranchTypeFirstUsingBit));
736 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
737 B(static_cast<Condition>(type), label);
738 } else {
739 switch (type) {
740 case always: B(label); break;
741 case never: break;
742 case reg_zero: Cbz(reg, label); break;
743 case reg_not_zero: Cbnz(reg, label); break;
744 case reg_bit_clear: Tbz(reg, bit, label); break;
745 case reg_bit_set: Tbnz(reg, bit, label); break;
746 default:
747 UNREACHABLE();
748 }
749 }
750 }
751
752
B(Label * label,Condition cond)753 void MacroAssembler::B(Label* label, Condition cond) {
754 DCHECK(allow_macro_instructions_);
755 DCHECK((cond != al) && (cond != nv));
756
757 Label done;
758 bool need_extra_instructions =
759 NeedExtraInstructionsOrRegisterBranch(label, CondBranchType);
760
761 if (need_extra_instructions) {
762 b(&done, NegateCondition(cond));
763 B(label);
764 } else {
765 b(label, cond);
766 }
767 bind(&done);
768 }
769
770
Tbnz(const Register & rt,unsigned bit_pos,Label * label)771 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
772 DCHECK(allow_macro_instructions_);
773
774 Label done;
775 bool need_extra_instructions =
776 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
777
778 if (need_extra_instructions) {
779 tbz(rt, bit_pos, &done);
780 B(label);
781 } else {
782 tbnz(rt, bit_pos, label);
783 }
784 bind(&done);
785 }
786
787
Tbz(const Register & rt,unsigned bit_pos,Label * label)788 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
789 DCHECK(allow_macro_instructions_);
790
791 Label done;
792 bool need_extra_instructions =
793 NeedExtraInstructionsOrRegisterBranch(label, TestBranchType);
794
795 if (need_extra_instructions) {
796 tbnz(rt, bit_pos, &done);
797 B(label);
798 } else {
799 tbz(rt, bit_pos, label);
800 }
801 bind(&done);
802 }
803
804
Cbnz(const Register & rt,Label * label)805 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
806 DCHECK(allow_macro_instructions_);
807
808 Label done;
809 bool need_extra_instructions =
810 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
811
812 if (need_extra_instructions) {
813 cbz(rt, &done);
814 B(label);
815 } else {
816 cbnz(rt, label);
817 }
818 bind(&done);
819 }
820
821
Cbz(const Register & rt,Label * label)822 void MacroAssembler::Cbz(const Register& rt, Label* label) {
823 DCHECK(allow_macro_instructions_);
824
825 Label done;
826 bool need_extra_instructions =
827 NeedExtraInstructionsOrRegisterBranch(label, CompareBranchType);
828
829 if (need_extra_instructions) {
830 cbnz(rt, &done);
831 B(label);
832 } else {
833 cbz(rt, label);
834 }
835 bind(&done);
836 }
837
838
839 // Pseudo-instructions.
840
841
Abs(const Register & rd,const Register & rm,Label * is_not_representable,Label * is_representable)842 void MacroAssembler::Abs(const Register& rd, const Register& rm,
843 Label* is_not_representable,
844 Label* is_representable) {
845 DCHECK(allow_macro_instructions_);
846 DCHECK(AreSameSizeAndType(rd, rm));
847
848 Cmp(rm, 1);
849 Cneg(rd, rm, lt);
850
851 // If the comparison sets the v flag, the input was the smallest value
852 // representable by rm, and the mathematical result of abs(rm) is not
853 // representable using two's complement.
854 if ((is_not_representable != NULL) && (is_representable != NULL)) {
855 B(is_not_representable, vs);
856 B(is_representable);
857 } else if (is_not_representable != NULL) {
858 B(is_not_representable, vs);
859 } else if (is_representable != NULL) {
860 B(is_representable, vc);
861 }
862 }
863
864
865 // Abstracted stack operations.
866
867
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)868 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
869 const CPURegister& src2, const CPURegister& src3) {
870 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
871
872 int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
873 int size = src0.SizeInBytes();
874
875 PushPreamble(count, size);
876 PushHelper(count, size, src0, src1, src2, src3);
877 }
878
879
Push(const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3,const CPURegister & src4,const CPURegister & src5,const CPURegister & src6,const CPURegister & src7)880 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
881 const CPURegister& src2, const CPURegister& src3,
882 const CPURegister& src4, const CPURegister& src5,
883 const CPURegister& src6, const CPURegister& src7) {
884 DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
885
886 int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
887 int size = src0.SizeInBytes();
888
889 PushPreamble(count, size);
890 PushHelper(4, size, src0, src1, src2, src3);
891 PushHelper(count - 4, size, src4, src5, src6, src7);
892 }
893
894
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)895 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
896 const CPURegister& dst2, const CPURegister& dst3) {
897 // It is not valid to pop into the same register more than once in one
898 // instruction, not even into the zero register.
899 DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
900 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
901 DCHECK(dst0.IsValid());
902
903 int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
904 int size = dst0.SizeInBytes();
905
906 PopHelper(count, size, dst0, dst1, dst2, dst3);
907 PopPostamble(count, size);
908 }
909
910
Pop(const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3,const CPURegister & dst4,const CPURegister & dst5,const CPURegister & dst6,const CPURegister & dst7)911 void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
912 const CPURegister& dst2, const CPURegister& dst3,
913 const CPURegister& dst4, const CPURegister& dst5,
914 const CPURegister& dst6, const CPURegister& dst7) {
915 // It is not valid to pop into the same register more than once in one
916 // instruction, not even into the zero register.
917 DCHECK(!AreAliased(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
918 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7));
919 DCHECK(dst0.IsValid());
920
921 int count = 5 + dst5.IsValid() + dst6.IsValid() + dst7.IsValid();
922 int size = dst0.SizeInBytes();
923
924 PopHelper(4, size, dst0, dst1, dst2, dst3);
925 PopHelper(count - 4, size, dst4, dst5, dst6, dst7);
926 PopPostamble(count, size);
927 }
928
929
Push(const Register & src0,const FPRegister & src1)930 void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
931 int size = src0.SizeInBytes() + src1.SizeInBytes();
932
933 PushPreamble(size);
934 // Reserve room for src0 and push src1.
935 str(src1, MemOperand(StackPointer(), -size, PreIndex));
936 // Fill the gap with src0.
937 str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
938 }
939
940
PushQueued(PreambleDirective preamble_directive)941 void MacroAssembler::PushPopQueue::PushQueued(
942 PreambleDirective preamble_directive) {
943 if (queued_.empty()) return;
944
945 if (preamble_directive == WITH_PREAMBLE) {
946 masm_->PushPreamble(size_);
947 }
948
949 size_t count = queued_.size();
950 size_t index = 0;
951 while (index < count) {
952 // PushHelper can only handle registers with the same size and type, and it
953 // can handle only four at a time. Batch them up accordingly.
954 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
955 int batch_index = 0;
956 do {
957 batch[batch_index++] = queued_[index++];
958 } while ((batch_index < 4) && (index < count) &&
959 batch[0].IsSameSizeAndType(queued_[index]));
960
961 masm_->PushHelper(batch_index, batch[0].SizeInBytes(),
962 batch[0], batch[1], batch[2], batch[3]);
963 }
964
965 queued_.clear();
966 }
967
968
PopQueued()969 void MacroAssembler::PushPopQueue::PopQueued() {
970 if (queued_.empty()) return;
971
972 size_t count = queued_.size();
973 size_t index = 0;
974 while (index < count) {
975 // PopHelper can only handle registers with the same size and type, and it
976 // can handle only four at a time. Batch them up accordingly.
977 CPURegister batch[4] = {NoReg, NoReg, NoReg, NoReg};
978 int batch_index = 0;
979 do {
980 batch[batch_index++] = queued_[index++];
981 } while ((batch_index < 4) && (index < count) &&
982 batch[0].IsSameSizeAndType(queued_[index]));
983
984 masm_->PopHelper(batch_index, batch[0].SizeInBytes(),
985 batch[0], batch[1], batch[2], batch[3]);
986 }
987
988 masm_->PopPostamble(size_);
989 queued_.clear();
990 }
991
992
PushCPURegList(CPURegList registers)993 void MacroAssembler::PushCPURegList(CPURegList registers) {
994 int size = registers.RegisterSizeInBytes();
995
996 PushPreamble(registers.Count(), size);
997 // Push up to four registers at a time because if the current stack pointer is
998 // csp and reg_size is 32, registers must be pushed in blocks of four in order
999 // to maintain the 16-byte alignment for csp.
1000 while (!registers.IsEmpty()) {
1001 int count_before = registers.Count();
1002 const CPURegister& src0 = registers.PopHighestIndex();
1003 const CPURegister& src1 = registers.PopHighestIndex();
1004 const CPURegister& src2 = registers.PopHighestIndex();
1005 const CPURegister& src3 = registers.PopHighestIndex();
1006 int count = count_before - registers.Count();
1007 PushHelper(count, size, src0, src1, src2, src3);
1008 }
1009 }
1010
1011
PopCPURegList(CPURegList registers)1012 void MacroAssembler::PopCPURegList(CPURegList registers) {
1013 int size = registers.RegisterSizeInBytes();
1014
1015 // Pop up to four registers at a time because if the current stack pointer is
1016 // csp and reg_size is 32, registers must be pushed in blocks of four in
1017 // order to maintain the 16-byte alignment for csp.
1018 while (!registers.IsEmpty()) {
1019 int count_before = registers.Count();
1020 const CPURegister& dst0 = registers.PopLowestIndex();
1021 const CPURegister& dst1 = registers.PopLowestIndex();
1022 const CPURegister& dst2 = registers.PopLowestIndex();
1023 const CPURegister& dst3 = registers.PopLowestIndex();
1024 int count = count_before - registers.Count();
1025 PopHelper(count, size, dst0, dst1, dst2, dst3);
1026 }
1027 PopPostamble(registers.Count(), size);
1028 }
1029
1030
PushMultipleTimes(CPURegister src,int count)1031 void MacroAssembler::PushMultipleTimes(CPURegister src, int count) {
1032 int size = src.SizeInBytes();
1033
1034 PushPreamble(count, size);
1035
1036 if (FLAG_optimize_for_size && count > 8) {
1037 UseScratchRegisterScope temps(this);
1038 Register temp = temps.AcquireX();
1039
1040 Label loop;
1041 __ Mov(temp, count / 2);
1042 __ Bind(&loop);
1043 PushHelper(2, size, src, src, NoReg, NoReg);
1044 __ Subs(temp, temp, 1);
1045 __ B(ne, &loop);
1046
1047 count %= 2;
1048 }
1049
1050 // Push up to four registers at a time if possible because if the current
1051 // stack pointer is csp and the register size is 32, registers must be pushed
1052 // in blocks of four in order to maintain the 16-byte alignment for csp.
1053 while (count >= 4) {
1054 PushHelper(4, size, src, src, src, src);
1055 count -= 4;
1056 }
1057 if (count >= 2) {
1058 PushHelper(2, size, src, src, NoReg, NoReg);
1059 count -= 2;
1060 }
1061 if (count == 1) {
1062 PushHelper(1, size, src, NoReg, NoReg, NoReg);
1063 count -= 1;
1064 }
1065 DCHECK(count == 0);
1066 }
1067
1068
PushMultipleTimes(CPURegister src,Register count)1069 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
1070 PushPreamble(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
1071
1072 UseScratchRegisterScope temps(this);
1073 Register temp = temps.AcquireSameSizeAs(count);
1074
1075 if (FLAG_optimize_for_size) {
1076 Label loop, done;
1077
1078 Subs(temp, count, 1);
1079 B(mi, &done);
1080
1081 // Push all registers individually, to save code size.
1082 Bind(&loop);
1083 Subs(temp, temp, 1);
1084 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1085 B(pl, &loop);
1086
1087 Bind(&done);
1088 } else {
1089 Label loop, leftover2, leftover1, done;
1090
1091 Subs(temp, count, 4);
1092 B(mi, &leftover2);
1093
1094 // Push groups of four first.
1095 Bind(&loop);
1096 Subs(temp, temp, 4);
1097 PushHelper(4, src.SizeInBytes(), src, src, src, src);
1098 B(pl, &loop);
1099
1100 // Push groups of two.
1101 Bind(&leftover2);
1102 Tbz(count, 1, &leftover1);
1103 PushHelper(2, src.SizeInBytes(), src, src, NoReg, NoReg);
1104
1105 // Push the last one (if required).
1106 Bind(&leftover1);
1107 Tbz(count, 0, &done);
1108 PushHelper(1, src.SizeInBytes(), src, NoReg, NoReg, NoReg);
1109
1110 Bind(&done);
1111 }
1112 }
1113
1114
PushHelper(int count,int size,const CPURegister & src0,const CPURegister & src1,const CPURegister & src2,const CPURegister & src3)1115 void MacroAssembler::PushHelper(int count, int size,
1116 const CPURegister& src0,
1117 const CPURegister& src1,
1118 const CPURegister& src2,
1119 const CPURegister& src3) {
1120 // Ensure that we don't unintentially modify scratch or debug registers.
1121 InstructionAccurateScope scope(this);
1122
1123 DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
1124 DCHECK(size == src0.SizeInBytes());
1125
1126 // When pushing multiple registers, the store order is chosen such that
1127 // Push(a, b) is equivalent to Push(a) followed by Push(b).
1128 switch (count) {
1129 case 1:
1130 DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
1131 str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
1132 break;
1133 case 2:
1134 DCHECK(src2.IsNone() && src3.IsNone());
1135 stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
1136 break;
1137 case 3:
1138 DCHECK(src3.IsNone());
1139 stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
1140 str(src0, MemOperand(StackPointer(), 2 * size));
1141 break;
1142 case 4:
1143 // Skip over 4 * size, then fill in the gap. This allows four W registers
1144 // to be pushed using csp, whilst maintaining 16-byte alignment for csp
1145 // at all times.
1146 stp(src3, src2, MemOperand(StackPointer(), -4 * size, PreIndex));
1147 stp(src1, src0, MemOperand(StackPointer(), 2 * size));
1148 break;
1149 default:
1150 UNREACHABLE();
1151 }
1152 }
1153
1154
PopHelper(int count,int size,const CPURegister & dst0,const CPURegister & dst1,const CPURegister & dst2,const CPURegister & dst3)1155 void MacroAssembler::PopHelper(int count, int size,
1156 const CPURegister& dst0,
1157 const CPURegister& dst1,
1158 const CPURegister& dst2,
1159 const CPURegister& dst3) {
1160 // Ensure that we don't unintentially modify scratch or debug registers.
1161 InstructionAccurateScope scope(this);
1162
1163 DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
1164 DCHECK(size == dst0.SizeInBytes());
1165
1166 // When popping multiple registers, the load order is chosen such that
1167 // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
1168 switch (count) {
1169 case 1:
1170 DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
1171 ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
1172 break;
1173 case 2:
1174 DCHECK(dst2.IsNone() && dst3.IsNone());
1175 ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
1176 break;
1177 case 3:
1178 DCHECK(dst3.IsNone());
1179 ldr(dst2, MemOperand(StackPointer(), 2 * size));
1180 ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
1181 break;
1182 case 4:
1183 // Load the higher addresses first, then load the lower addresses and
1184 // skip the whole block in the second instruction. This allows four W
1185 // registers to be popped using csp, whilst maintaining 16-byte alignment
1186 // for csp at all times.
1187 ldp(dst2, dst3, MemOperand(StackPointer(), 2 * size));
1188 ldp(dst0, dst1, MemOperand(StackPointer(), 4 * size, PostIndex));
1189 break;
1190 default:
1191 UNREACHABLE();
1192 }
1193 }
1194
1195
PushPreamble(Operand total_size)1196 void MacroAssembler::PushPreamble(Operand total_size) {
1197 if (csp.Is(StackPointer())) {
1198 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1199 // on entry and the total size of the specified registers must also be a
1200 // multiple of 16 bytes.
1201 if (total_size.IsImmediate()) {
1202 DCHECK((total_size.ImmediateValue() % 16) == 0);
1203 }
1204
1205 // Don't check access size for non-immediate sizes. It's difficult to do
1206 // well, and it will be caught by hardware (or the simulator) anyway.
1207 } else {
1208 // Even if the current stack pointer is not the system stack pointer (csp),
1209 // the system stack pointer will still be modified in order to comply with
1210 // ABI rules about accessing memory below the system stack pointer.
1211 BumpSystemStackPointer(total_size);
1212 }
1213 }
1214
1215
PopPostamble(Operand total_size)1216 void MacroAssembler::PopPostamble(Operand total_size) {
1217 if (csp.Is(StackPointer())) {
1218 // If the current stack pointer is csp, then it must be aligned to 16 bytes
1219 // on entry and the total size of the specified registers must also be a
1220 // multiple of 16 bytes.
1221 if (total_size.IsImmediate()) {
1222 DCHECK((total_size.ImmediateValue() % 16) == 0);
1223 }
1224
1225 // Don't check access size for non-immediate sizes. It's difficult to do
1226 // well, and it will be caught by hardware (or the simulator) anyway.
1227 } else if (emit_debug_code()) {
1228 // It is safe to leave csp where it is when unwinding the JavaScript stack,
1229 // but if we keep it matching StackPointer, the simulator can detect memory
1230 // accesses in the now-free part of the stack.
1231 SyncSystemStackPointer();
1232 }
1233 }
1234
1235
Poke(const CPURegister & src,const Operand & offset)1236 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
1237 if (offset.IsImmediate()) {
1238 DCHECK(offset.ImmediateValue() >= 0);
1239 } else if (emit_debug_code()) {
1240 Cmp(xzr, offset);
1241 Check(le, kStackAccessBelowStackPointer);
1242 }
1243
1244 Str(src, MemOperand(StackPointer(), offset));
1245 }
1246
1247
Peek(const CPURegister & dst,const Operand & offset)1248 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
1249 if (offset.IsImmediate()) {
1250 DCHECK(offset.ImmediateValue() >= 0);
1251 } else if (emit_debug_code()) {
1252 Cmp(xzr, offset);
1253 Check(le, kStackAccessBelowStackPointer);
1254 }
1255
1256 Ldr(dst, MemOperand(StackPointer(), offset));
1257 }
1258
1259
PokePair(const CPURegister & src1,const CPURegister & src2,int offset)1260 void MacroAssembler::PokePair(const CPURegister& src1,
1261 const CPURegister& src2,
1262 int offset) {
1263 DCHECK(AreSameSizeAndType(src1, src2));
1264 DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
1265 Stp(src1, src2, MemOperand(StackPointer(), offset));
1266 }
1267
1268
PeekPair(const CPURegister & dst1,const CPURegister & dst2,int offset)1269 void MacroAssembler::PeekPair(const CPURegister& dst1,
1270 const CPURegister& dst2,
1271 int offset) {
1272 DCHECK(AreSameSizeAndType(dst1, dst2));
1273 DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
1274 Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
1275 }
1276
1277
PushCalleeSavedRegisters()1278 void MacroAssembler::PushCalleeSavedRegisters() {
1279 // Ensure that the macro-assembler doesn't use any scratch registers.
1280 InstructionAccurateScope scope(this);
1281
1282 // This method must not be called unless the current stack pointer is the
1283 // system stack pointer (csp).
1284 DCHECK(csp.Is(StackPointer()));
1285
1286 MemOperand tos(csp, -2 * static_cast<int>(kXRegSize), PreIndex);
1287
1288 stp(d14, d15, tos);
1289 stp(d12, d13, tos);
1290 stp(d10, d11, tos);
1291 stp(d8, d9, tos);
1292
1293 stp(x29, x30, tos);
1294 stp(x27, x28, tos); // x28 = jssp
1295 stp(x25, x26, tos);
1296 stp(x23, x24, tos);
1297 stp(x21, x22, tos);
1298 stp(x19, x20, tos);
1299 }
1300
1301
PopCalleeSavedRegisters()1302 void MacroAssembler::PopCalleeSavedRegisters() {
1303 // Ensure that the macro-assembler doesn't use any scratch registers.
1304 InstructionAccurateScope scope(this);
1305
1306 // This method must not be called unless the current stack pointer is the
1307 // system stack pointer (csp).
1308 DCHECK(csp.Is(StackPointer()));
1309
1310 MemOperand tos(csp, 2 * kXRegSize, PostIndex);
1311
1312 ldp(x19, x20, tos);
1313 ldp(x21, x22, tos);
1314 ldp(x23, x24, tos);
1315 ldp(x25, x26, tos);
1316 ldp(x27, x28, tos); // x28 = jssp
1317 ldp(x29, x30, tos);
1318
1319 ldp(d8, d9, tos);
1320 ldp(d10, d11, tos);
1321 ldp(d12, d13, tos);
1322 ldp(d14, d15, tos);
1323 }
1324
1325
AssertStackConsistency()1326 void MacroAssembler::AssertStackConsistency() {
1327 // Avoid emitting code when !use_real_abort() since non-real aborts cause too
1328 // much code to be generated.
1329 if (emit_debug_code() && use_real_aborts()) {
1330 if (csp.Is(StackPointer())) {
1331 // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true. We
1332 // can't check the alignment of csp without using a scratch register (or
1333 // clobbering the flags), but the processor (or simulator) will abort if
1334 // it is not properly aligned during a load.
1335 ldr(xzr, MemOperand(csp, 0));
1336 }
1337 if (FLAG_enable_slow_asserts && !csp.Is(StackPointer())) {
1338 Label ok;
1339 // Check that csp <= StackPointer(), preserving all registers and NZCV.
1340 sub(StackPointer(), csp, StackPointer());
1341 cbz(StackPointer(), &ok); // Ok if csp == StackPointer().
1342 tbnz(StackPointer(), kXSignBit, &ok); // Ok if csp < StackPointer().
1343
1344 // Avoid generating AssertStackConsistency checks for the Push in Abort.
1345 { DontEmitDebugCodeScope dont_emit_debug_code_scope(this);
1346 // Restore StackPointer().
1347 sub(StackPointer(), csp, StackPointer());
1348 Abort(kTheCurrentStackPointerIsBelowCsp);
1349 }
1350
1351 bind(&ok);
1352 // Restore StackPointer().
1353 sub(StackPointer(), csp, StackPointer());
1354 }
1355 }
1356 }
1357
AssertCspAligned()1358 void MacroAssembler::AssertCspAligned() {
1359 if (emit_debug_code() && use_real_aborts()) {
1360 // TODO(titzer): use a real assert for alignment check?
1361 UseScratchRegisterScope scope(this);
1362 Register temp = scope.AcquireX();
1363 ldr(temp, MemOperand(csp));
1364 }
1365 }
1366
AssertFPCRState(Register fpcr)1367 void MacroAssembler::AssertFPCRState(Register fpcr) {
1368 if (emit_debug_code()) {
1369 Label unexpected_mode, done;
1370 UseScratchRegisterScope temps(this);
1371 if (fpcr.IsNone()) {
1372 fpcr = temps.AcquireX();
1373 Mrs(fpcr, FPCR);
1374 }
1375
1376 // Settings left to their default values:
1377 // - Assert that flush-to-zero is not set.
1378 Tbnz(fpcr, FZ_offset, &unexpected_mode);
1379 // - Assert that the rounding mode is nearest-with-ties-to-even.
1380 STATIC_ASSERT(FPTieEven == 0);
1381 Tst(fpcr, RMode_mask);
1382 B(eq, &done);
1383
1384 Bind(&unexpected_mode);
1385 Abort(kUnexpectedFPCRMode);
1386
1387 Bind(&done);
1388 }
1389 }
1390
1391
CanonicalizeNaN(const FPRegister & dst,const FPRegister & src)1392 void MacroAssembler::CanonicalizeNaN(const FPRegister& dst,
1393 const FPRegister& src) {
1394 AssertFPCRState();
1395
1396 // Subtracting 0.0 preserves all inputs except for signalling NaNs, which
1397 // become quiet NaNs. We use fsub rather than fadd because fsub preserves -0.0
1398 // inputs: -0.0 + 0.0 = 0.0, but -0.0 - 0.0 = -0.0.
1399 Fsub(dst, src, fp_zero);
1400 }
1401
1402
LoadRoot(CPURegister destination,Heap::RootListIndex index)1403 void MacroAssembler::LoadRoot(CPURegister destination,
1404 Heap::RootListIndex index) {
1405 // TODO(jbramley): Most root values are constants, and can be synthesized
1406 // without a load. Refer to the ARM back end for details.
1407 Ldr(destination, MemOperand(root, index << kPointerSizeLog2));
1408 }
1409
1410
StoreRoot(Register source,Heap::RootListIndex index)1411 void MacroAssembler::StoreRoot(Register source,
1412 Heap::RootListIndex index) {
1413 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
1414 Str(source, MemOperand(root, index << kPointerSizeLog2));
1415 }
1416
1417
LoadTrueFalseRoots(Register true_root,Register false_root)1418 void MacroAssembler::LoadTrueFalseRoots(Register true_root,
1419 Register false_root) {
1420 STATIC_ASSERT((Heap::kTrueValueRootIndex + 1) == Heap::kFalseValueRootIndex);
1421 Ldp(true_root, false_root,
1422 MemOperand(root, Heap::kTrueValueRootIndex << kPointerSizeLog2));
1423 }
1424
1425
LoadHeapObject(Register result,Handle<HeapObject> object)1426 void MacroAssembler::LoadHeapObject(Register result,
1427 Handle<HeapObject> object) {
1428 Mov(result, Operand(object));
1429 }
1430
1431
LoadInstanceDescriptors(Register map,Register descriptors)1432 void MacroAssembler::LoadInstanceDescriptors(Register map,
1433 Register descriptors) {
1434 Ldr(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
1435 }
1436
1437
NumberOfOwnDescriptors(Register dst,Register map)1438 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
1439 Ldr(dst, FieldMemOperand(map, Map::kBitField3Offset));
1440 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
1441 }
1442
1443
EnumLengthUntagged(Register dst,Register map)1444 void MacroAssembler::EnumLengthUntagged(Register dst, Register map) {
1445 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
1446 Ldrsw(dst, FieldMemOperand(map, Map::kBitField3Offset));
1447 And(dst, dst, Map::EnumLengthBits::kMask);
1448 }
1449
1450
EnumLengthSmi(Register dst,Register map)1451 void MacroAssembler::EnumLengthSmi(Register dst, Register map) {
1452 EnumLengthUntagged(dst, map);
1453 SmiTag(dst, dst);
1454 }
1455
1456
LoadAccessor(Register dst,Register holder,int accessor_index,AccessorComponent accessor)1457 void MacroAssembler::LoadAccessor(Register dst, Register holder,
1458 int accessor_index,
1459 AccessorComponent accessor) {
1460 Ldr(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
1461 LoadInstanceDescriptors(dst, dst);
1462 Ldr(dst,
1463 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
1464 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset
1465 : AccessorPair::kSetterOffset;
1466 Ldr(dst, FieldMemOperand(dst, offset));
1467 }
1468
1469
CheckEnumCache(Register object,Register scratch0,Register scratch1,Register scratch2,Register scratch3,Register scratch4,Label * call_runtime)1470 void MacroAssembler::CheckEnumCache(Register object, Register scratch0,
1471 Register scratch1, Register scratch2,
1472 Register scratch3, Register scratch4,
1473 Label* call_runtime) {
1474 DCHECK(!AreAliased(object, scratch0, scratch1, scratch2, scratch3, scratch4));
1475
1476 Register empty_fixed_array_value = scratch0;
1477 Register current_object = scratch1;
1478 Register null_value = scratch4;
1479
1480 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
1481 Label next, start;
1482
1483 Mov(current_object, object);
1484
1485 // Check if the enum length field is properly initialized, indicating that
1486 // there is an enum cache.
1487 Register map = scratch2;
1488 Register enum_length = scratch3;
1489 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1490
1491 EnumLengthUntagged(enum_length, map);
1492 Cmp(enum_length, kInvalidEnumCacheSentinel);
1493 B(eq, call_runtime);
1494
1495 LoadRoot(null_value, Heap::kNullValueRootIndex);
1496 B(&start);
1497
1498 Bind(&next);
1499 Ldr(map, FieldMemOperand(current_object, HeapObject::kMapOffset));
1500
1501 // For all objects but the receiver, check that the cache is empty.
1502 EnumLengthUntagged(enum_length, map);
1503 Cbnz(enum_length, call_runtime);
1504
1505 Bind(&start);
1506
1507 // Check that there are no elements. Register current_object contains the
1508 // current JS object we've reached through the prototype chain.
1509 Label no_elements;
1510 Ldr(current_object, FieldMemOperand(current_object,
1511 JSObject::kElementsOffset));
1512 Cmp(current_object, empty_fixed_array_value);
1513 B(eq, &no_elements);
1514
1515 // Second chance, the object may be using the empty slow element dictionary.
1516 CompareRoot(current_object, Heap::kEmptySlowElementDictionaryRootIndex);
1517 B(ne, call_runtime);
1518
1519 Bind(&no_elements);
1520 Ldr(current_object, FieldMemOperand(map, Map::kPrototypeOffset));
1521 Cmp(current_object, null_value);
1522 B(ne, &next);
1523 }
1524
1525
TestJSArrayForAllocationMemento(Register receiver,Register scratch1,Register scratch2,Label * no_memento_found)1526 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver,
1527 Register scratch1,
1528 Register scratch2,
1529 Label* no_memento_found) {
1530 Label map_check;
1531 Label top_check;
1532 ExternalReference new_space_allocation_top_adr =
1533 ExternalReference::new_space_allocation_top_address(isolate());
1534 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
1535 const int kMementoLastWordOffset =
1536 kMementoMapOffset + AllocationMemento::kSize - kPointerSize;
1537
1538 // Bail out if the object is not in new space.
1539 JumpIfNotInNewSpace(receiver, no_memento_found);
1540 Add(scratch1, receiver, kMementoLastWordOffset);
1541 // If the object is in new space, we need to check whether it is on the same
1542 // page as the current top.
1543 Mov(scratch2, new_space_allocation_top_adr);
1544 Ldr(scratch2, MemOperand(scratch2));
1545 Eor(scratch2, scratch1, scratch2);
1546 Tst(scratch2, ~Page::kPageAlignmentMask);
1547 B(eq, &top_check);
1548 // The object is on a different page than allocation top. Bail out if the
1549 // object sits on the page boundary as no memento can follow and we cannot
1550 // touch the memory following it.
1551 Eor(scratch2, scratch1, receiver);
1552 Tst(scratch2, ~Page::kPageAlignmentMask);
1553 B(ne, no_memento_found);
1554 // Continue with the actual map check.
1555 jmp(&map_check);
1556 // If top is on the same page as the current object, we need to check whether
1557 // we are below top.
1558 bind(&top_check);
1559 Mov(scratch2, new_space_allocation_top_adr);
1560 Ldr(scratch2, MemOperand(scratch2));
1561 Cmp(scratch1, scratch2);
1562 B(ge, no_memento_found);
1563 // Memento map check.
1564 bind(&map_check);
1565 Ldr(scratch1, MemOperand(receiver, kMementoMapOffset));
1566 Cmp(scratch1, Operand(isolate()->factory()->allocation_memento_map()));
1567 }
1568
1569
InNewSpace(Register object,Condition cond,Label * branch)1570 void MacroAssembler::InNewSpace(Register object,
1571 Condition cond,
1572 Label* branch) {
1573 DCHECK(cond == eq || cond == ne);
1574 UseScratchRegisterScope temps(this);
1575 CheckPageFlag(object, temps.AcquireSameSizeAs(object),
1576 MemoryChunk::kIsInNewSpaceMask, cond, branch);
1577 }
1578
1579
AssertSmi(Register object,BailoutReason reason)1580 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
1581 if (emit_debug_code()) {
1582 STATIC_ASSERT(kSmiTag == 0);
1583 Tst(object, kSmiTagMask);
1584 Check(eq, reason);
1585 }
1586 }
1587
1588
AssertNotSmi(Register object,BailoutReason reason)1589 void MacroAssembler::AssertNotSmi(Register object, BailoutReason reason) {
1590 if (emit_debug_code()) {
1591 STATIC_ASSERT(kSmiTag == 0);
1592 Tst(object, kSmiTagMask);
1593 Check(ne, reason);
1594 }
1595 }
1596
1597
AssertName(Register object)1598 void MacroAssembler::AssertName(Register object) {
1599 if (emit_debug_code()) {
1600 AssertNotSmi(object, kOperandIsASmiAndNotAName);
1601
1602 UseScratchRegisterScope temps(this);
1603 Register temp = temps.AcquireX();
1604
1605 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1606 CompareInstanceType(temp, temp, LAST_NAME_TYPE);
1607 Check(ls, kOperandIsNotAName);
1608 }
1609 }
1610
1611
AssertFunction(Register object)1612 void MacroAssembler::AssertFunction(Register object) {
1613 if (emit_debug_code()) {
1614 AssertNotSmi(object, kOperandIsASmiAndNotAFunction);
1615
1616 UseScratchRegisterScope temps(this);
1617 Register temp = temps.AcquireX();
1618
1619 CompareObjectType(object, temp, temp, JS_FUNCTION_TYPE);
1620 Check(eq, kOperandIsNotAFunction);
1621 }
1622 }
1623
1624
AssertBoundFunction(Register object)1625 void MacroAssembler::AssertBoundFunction(Register object) {
1626 if (emit_debug_code()) {
1627 AssertNotSmi(object, kOperandIsASmiAndNotABoundFunction);
1628
1629 UseScratchRegisterScope temps(this);
1630 Register temp = temps.AcquireX();
1631
1632 CompareObjectType(object, temp, temp, JS_BOUND_FUNCTION_TYPE);
1633 Check(eq, kOperandIsNotABoundFunction);
1634 }
1635 }
1636
AssertGeneratorObject(Register object)1637 void MacroAssembler::AssertGeneratorObject(Register object) {
1638 if (emit_debug_code()) {
1639 AssertNotSmi(object, kOperandIsASmiAndNotAGeneratorObject);
1640
1641 UseScratchRegisterScope temps(this);
1642 Register temp = temps.AcquireX();
1643
1644 CompareObjectType(object, temp, temp, JS_GENERATOR_OBJECT_TYPE);
1645 Check(eq, kOperandIsNotAGeneratorObject);
1646 }
1647 }
1648
AssertReceiver(Register object)1649 void MacroAssembler::AssertReceiver(Register object) {
1650 if (emit_debug_code()) {
1651 AssertNotSmi(object, kOperandIsASmiAndNotAReceiver);
1652
1653 UseScratchRegisterScope temps(this);
1654 Register temp = temps.AcquireX();
1655
1656 STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
1657 CompareObjectType(object, temp, temp, FIRST_JS_RECEIVER_TYPE);
1658 Check(hs, kOperandIsNotAReceiver);
1659 }
1660 }
1661
1662
AssertUndefinedOrAllocationSite(Register object,Register scratch)1663 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1664 Register scratch) {
1665 if (emit_debug_code()) {
1666 Label done_checking;
1667 AssertNotSmi(object);
1668 JumpIfRoot(object, Heap::kUndefinedValueRootIndex, &done_checking);
1669 Ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1670 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
1671 Assert(eq, kExpectedUndefinedOrCell);
1672 Bind(&done_checking);
1673 }
1674 }
1675
1676
AssertString(Register object)1677 void MacroAssembler::AssertString(Register object) {
1678 if (emit_debug_code()) {
1679 UseScratchRegisterScope temps(this);
1680 Register temp = temps.AcquireX();
1681 STATIC_ASSERT(kSmiTag == 0);
1682 Tst(object, kSmiTagMask);
1683 Check(ne, kOperandIsASmiAndNotAString);
1684 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
1685 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
1686 Check(lo, kOperandIsNotAString);
1687 }
1688 }
1689
1690
AssertPositiveOrZero(Register value)1691 void MacroAssembler::AssertPositiveOrZero(Register value) {
1692 if (emit_debug_code()) {
1693 Label done;
1694 int sign_bit = value.Is64Bits() ? kXSignBit : kWSignBit;
1695 Tbz(value, sign_bit, &done);
1696 Abort(kUnexpectedNegativeValue);
1697 Bind(&done);
1698 }
1699 }
1700
AssertNotNumber(Register value)1701 void MacroAssembler::AssertNotNumber(Register value) {
1702 if (emit_debug_code()) {
1703 STATIC_ASSERT(kSmiTag == 0);
1704 Tst(value, kSmiTagMask);
1705 Check(ne, kOperandIsANumber);
1706 Label done;
1707 JumpIfNotHeapNumber(value, &done);
1708 Abort(kOperandIsANumber);
1709 Bind(&done);
1710 }
1711 }
1712
AssertNumber(Register value)1713 void MacroAssembler::AssertNumber(Register value) {
1714 if (emit_debug_code()) {
1715 Label done;
1716 JumpIfSmi(value, &done);
1717 JumpIfHeapNumber(value, &done);
1718 Abort(kOperandIsNotANumber);
1719 Bind(&done);
1720 }
1721 }
1722
CallStub(CodeStub * stub,TypeFeedbackId ast_id)1723 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
1724 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
1725 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
1726 }
1727
1728
TailCallStub(CodeStub * stub)1729 void MacroAssembler::TailCallStub(CodeStub* stub) {
1730 Jump(stub->GetCode(), RelocInfo::CODE_TARGET);
1731 }
1732
1733
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1734 void MacroAssembler::CallRuntime(const Runtime::Function* f,
1735 int num_arguments,
1736 SaveFPRegsMode save_doubles) {
1737 // All arguments must be on the stack before this function is called.
1738 // x0 holds the return value after the call.
1739
1740 // Check that the number of arguments matches what the function expects.
1741 // If f->nargs is -1, the function can accept a variable number of arguments.
1742 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1743
1744 // Place the necessary arguments.
1745 Mov(x0, num_arguments);
1746 Mov(x1, ExternalReference(f, isolate()));
1747
1748 CEntryStub stub(isolate(), 1, save_doubles);
1749 CallStub(&stub);
1750 }
1751
1752
CallExternalReference(const ExternalReference & ext,int num_arguments)1753 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
1754 int num_arguments) {
1755 Mov(x0, num_arguments);
1756 Mov(x1, ext);
1757
1758 CEntryStub stub(isolate(), 1);
1759 CallStub(&stub);
1760 }
1761
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1762 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1763 bool builtin_exit_frame) {
1764 Mov(x1, builtin);
1765 CEntryStub stub(isolate(), 1, kDontSaveFPRegs, kArgvOnStack,
1766 builtin_exit_frame);
1767 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1768 }
1769
TailCallRuntime(Runtime::FunctionId fid)1770 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1771 const Runtime::Function* function = Runtime::FunctionForId(fid);
1772 DCHECK_EQ(1, function->result_size);
1773 if (function->nargs >= 0) {
1774 // TODO(1236192): Most runtime routines don't need the number of
1775 // arguments passed in because it is constant. At some point we
1776 // should remove this need and make the runtime routine entry code
1777 // smarter.
1778 Mov(x0, function->nargs);
1779 }
1780 JumpToExternalReference(ExternalReference(fid, isolate()));
1781 }
1782
ActivationFrameAlignment()1783 int MacroAssembler::ActivationFrameAlignment() {
1784 #if V8_HOST_ARCH_ARM64
1785 // Running on the real platform. Use the alignment as mandated by the local
1786 // environment.
1787 // Note: This will break if we ever start generating snapshots on one ARM
1788 // platform for another ARM platform with a different alignment.
1789 return base::OS::ActivationFrameAlignment();
1790 #else // V8_HOST_ARCH_ARM64
1791 // If we are using the simulator then we should always align to the expected
1792 // alignment. As the simulator is used to generate snapshots we do not know
1793 // if the target platform will need alignment, so this is controlled from a
1794 // flag.
1795 return FLAG_sim_stack_alignment;
1796 #endif // V8_HOST_ARCH_ARM64
1797 }
1798
1799
CallCFunction(ExternalReference function,int num_of_reg_args)1800 void MacroAssembler::CallCFunction(ExternalReference function,
1801 int num_of_reg_args) {
1802 CallCFunction(function, num_of_reg_args, 0);
1803 }
1804
1805
CallCFunction(ExternalReference function,int num_of_reg_args,int num_of_double_args)1806 void MacroAssembler::CallCFunction(ExternalReference function,
1807 int num_of_reg_args,
1808 int num_of_double_args) {
1809 UseScratchRegisterScope temps(this);
1810 Register temp = temps.AcquireX();
1811 Mov(temp, function);
1812 CallCFunction(temp, num_of_reg_args, num_of_double_args);
1813 }
1814
1815
CallCFunction(Register function,int num_of_reg_args,int num_of_double_args)1816 void MacroAssembler::CallCFunction(Register function,
1817 int num_of_reg_args,
1818 int num_of_double_args) {
1819 DCHECK(has_frame());
1820 // We can pass 8 integer arguments in registers. If we need to pass more than
1821 // that, we'll need to implement support for passing them on the stack.
1822 DCHECK(num_of_reg_args <= 8);
1823
1824 // If we're passing doubles, we're limited to the following prototypes
1825 // (defined by ExternalReference::Type):
1826 // BUILTIN_COMPARE_CALL: int f(double, double)
1827 // BUILTIN_FP_FP_CALL: double f(double, double)
1828 // BUILTIN_FP_CALL: double f(double)
1829 // BUILTIN_FP_INT_CALL: double f(double, int)
1830 if (num_of_double_args > 0) {
1831 DCHECK(num_of_reg_args <= 1);
1832 DCHECK((num_of_double_args + num_of_reg_args) <= 2);
1833 }
1834
1835
1836 // If the stack pointer is not csp, we need to derive an aligned csp from the
1837 // current stack pointer.
1838 const Register old_stack_pointer = StackPointer();
1839 if (!csp.Is(old_stack_pointer)) {
1840 AssertStackConsistency();
1841
1842 int sp_alignment = ActivationFrameAlignment();
1843 // The ABI mandates at least 16-byte alignment.
1844 DCHECK(sp_alignment >= 16);
1845 DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
1846
1847 // The current stack pointer is a callee saved register, and is preserved
1848 // across the call.
1849 DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
1850
1851 // Align and synchronize the system stack pointer with jssp.
1852 Bic(csp, old_stack_pointer, sp_alignment - 1);
1853 SetStackPointer(csp);
1854 }
1855
1856 // Call directly. The function called cannot cause a GC, or allow preemption,
1857 // so the return address in the link register stays correct.
1858 Call(function);
1859
1860 if (!csp.Is(old_stack_pointer)) {
1861 if (emit_debug_code()) {
1862 // Because the stack pointer must be aligned on a 16-byte boundary, the
1863 // aligned csp can be up to 12 bytes below the jssp. This is the case
1864 // where we only pushed one W register on top of an aligned jssp.
1865 UseScratchRegisterScope temps(this);
1866 Register temp = temps.AcquireX();
1867 DCHECK(ActivationFrameAlignment() == 16);
1868 Sub(temp, csp, old_stack_pointer);
1869 // We want temp <= 0 && temp >= -12.
1870 Cmp(temp, 0);
1871 Ccmp(temp, -12, NFlag, le);
1872 Check(ge, kTheStackWasCorruptedByMacroAssemblerCall);
1873 }
1874 SetStackPointer(old_stack_pointer);
1875 }
1876 }
1877
1878
Jump(Register target)1879 void MacroAssembler::Jump(Register target) {
1880 Br(target);
1881 }
1882
1883
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond)1884 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
1885 Condition cond) {
1886 if (cond == nv) return;
1887 UseScratchRegisterScope temps(this);
1888 Register temp = temps.AcquireX();
1889 Label done;
1890 if (cond != al) B(NegateCondition(cond), &done);
1891 Mov(temp, Operand(target, rmode));
1892 Br(temp);
1893 Bind(&done);
1894 }
1895
1896
Jump(Address target,RelocInfo::Mode rmode,Condition cond)1897 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
1898 Condition cond) {
1899 DCHECK(!RelocInfo::IsCodeTarget(rmode));
1900 Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
1901 }
1902
1903
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)1904 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
1905 Condition cond) {
1906 DCHECK(RelocInfo::IsCodeTarget(rmode));
1907 AllowDeferredHandleDereference embedding_raw_address;
1908 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
1909 }
1910
1911
Call(Register target)1912 void MacroAssembler::Call(Register target) {
1913 BlockPoolsScope scope(this);
1914 #ifdef DEBUG
1915 Label start_call;
1916 Bind(&start_call);
1917 #endif
1918
1919 Blr(target);
1920
1921 #ifdef DEBUG
1922 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1923 #endif
1924 }
1925
1926
Call(Label * target)1927 void MacroAssembler::Call(Label* target) {
1928 BlockPoolsScope scope(this);
1929 #ifdef DEBUG
1930 Label start_call;
1931 Bind(&start_call);
1932 #endif
1933
1934 Bl(target);
1935
1936 #ifdef DEBUG
1937 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target));
1938 #endif
1939 }
1940
1941
1942 // MacroAssembler::CallSize is sensitive to changes in this function, as it
1943 // requires to know how many instructions are used to branch to the target.
Call(Address target,RelocInfo::Mode rmode)1944 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode) {
1945 BlockPoolsScope scope(this);
1946 #ifdef DEBUG
1947 Label start_call;
1948 Bind(&start_call);
1949 #endif
1950
1951 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
1952 DCHECK(rmode != RelocInfo::NONE32);
1953
1954 UseScratchRegisterScope temps(this);
1955 Register temp = temps.AcquireX();
1956
1957 if (rmode == RelocInfo::NONE64) {
1958 // Addresses are 48 bits so we never need to load the upper 16 bits.
1959 uint64_t imm = reinterpret_cast<uint64_t>(target);
1960 // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
1961 DCHECK(((imm >> 48) & 0xffff) == 0);
1962 movz(temp, (imm >> 0) & 0xffff, 0);
1963 movk(temp, (imm >> 16) & 0xffff, 16);
1964 movk(temp, (imm >> 32) & 0xffff, 32);
1965 } else {
1966 Ldr(temp, Immediate(reinterpret_cast<intptr_t>(target), rmode));
1967 }
1968 Blr(temp);
1969 #ifdef DEBUG
1970 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
1971 #endif
1972 }
1973
1974
Call(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id)1975 void MacroAssembler::Call(Handle<Code> code,
1976 RelocInfo::Mode rmode,
1977 TypeFeedbackId ast_id) {
1978 #ifdef DEBUG
1979 Label start_call;
1980 Bind(&start_call);
1981 #endif
1982
1983 if ((rmode == RelocInfo::CODE_TARGET) && (!ast_id.IsNone())) {
1984 SetRecordedAstId(ast_id);
1985 rmode = RelocInfo::CODE_TARGET_WITH_ID;
1986 }
1987
1988 AllowDeferredHandleDereference embedding_raw_address;
1989 Call(reinterpret_cast<Address>(code.location()), rmode);
1990
1991 #ifdef DEBUG
1992 // Check the size of the code generated.
1993 AssertSizeOfCodeGeneratedSince(&start_call, CallSize(code, rmode, ast_id));
1994 #endif
1995 }
1996
1997
CallSize(Register target)1998 int MacroAssembler::CallSize(Register target) {
1999 USE(target);
2000 return kInstructionSize;
2001 }
2002
2003
CallSize(Label * target)2004 int MacroAssembler::CallSize(Label* target) {
2005 USE(target);
2006 return kInstructionSize;
2007 }
2008
2009
CallSize(Address target,RelocInfo::Mode rmode)2010 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode) {
2011 USE(target);
2012
2013 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2014 DCHECK(rmode != RelocInfo::NONE32);
2015
2016 if (rmode == RelocInfo::NONE64) {
2017 return kCallSizeWithoutRelocation;
2018 } else {
2019 return kCallSizeWithRelocation;
2020 }
2021 }
2022
2023
CallSize(Handle<Code> code,RelocInfo::Mode rmode,TypeFeedbackId ast_id)2024 int MacroAssembler::CallSize(Handle<Code> code,
2025 RelocInfo::Mode rmode,
2026 TypeFeedbackId ast_id) {
2027 USE(code);
2028 USE(ast_id);
2029
2030 // Addresses always have 64 bits, so we shouldn't encounter NONE32.
2031 DCHECK(rmode != RelocInfo::NONE32);
2032
2033 if (rmode == RelocInfo::NONE64) {
2034 return kCallSizeWithoutRelocation;
2035 } else {
2036 return kCallSizeWithRelocation;
2037 }
2038 }
2039
2040
JumpIfHeapNumber(Register object,Label * on_heap_number,SmiCheckType smi_check_type)2041 void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
2042 SmiCheckType smi_check_type) {
2043 Label on_not_heap_number;
2044
2045 if (smi_check_type == DO_SMI_CHECK) {
2046 JumpIfSmi(object, &on_not_heap_number);
2047 }
2048
2049 AssertNotSmi(object);
2050
2051 UseScratchRegisterScope temps(this);
2052 Register temp = temps.AcquireX();
2053 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2054 JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
2055
2056 Bind(&on_not_heap_number);
2057 }
2058
2059
JumpIfNotHeapNumber(Register object,Label * on_not_heap_number,SmiCheckType smi_check_type)2060 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2061 Label* on_not_heap_number,
2062 SmiCheckType smi_check_type) {
2063 if (smi_check_type == DO_SMI_CHECK) {
2064 JumpIfSmi(object, on_not_heap_number);
2065 }
2066
2067 AssertNotSmi(object);
2068
2069 UseScratchRegisterScope temps(this);
2070 Register temp = temps.AcquireX();
2071 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
2072 JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
2073 }
2074
2075
TryRepresentDoubleAsInt(Register as_int,FPRegister value,FPRegister scratch_d,Label * on_successful_conversion,Label * on_failed_conversion)2076 void MacroAssembler::TryRepresentDoubleAsInt(Register as_int,
2077 FPRegister value,
2078 FPRegister scratch_d,
2079 Label* on_successful_conversion,
2080 Label* on_failed_conversion) {
2081 // Convert to an int and back again, then compare with the original value.
2082 Fcvtzs(as_int, value);
2083 Scvtf(scratch_d, as_int);
2084 Fcmp(value, scratch_d);
2085
2086 if (on_successful_conversion) {
2087 B(on_successful_conversion, eq);
2088 }
2089 if (on_failed_conversion) {
2090 B(on_failed_conversion, ne);
2091 }
2092 }
2093
2094
TestForMinusZero(DoubleRegister input)2095 void MacroAssembler::TestForMinusZero(DoubleRegister input) {
2096 UseScratchRegisterScope temps(this);
2097 Register temp = temps.AcquireX();
2098 // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
2099 // cause overflow.
2100 Fmov(temp, input);
2101 Cmp(temp, 1);
2102 }
2103
2104
JumpIfMinusZero(DoubleRegister input,Label * on_negative_zero)2105 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
2106 Label* on_negative_zero) {
2107 TestForMinusZero(input);
2108 B(vs, on_negative_zero);
2109 }
2110
2111
JumpIfMinusZero(Register input,Label * on_negative_zero)2112 void MacroAssembler::JumpIfMinusZero(Register input,
2113 Label* on_negative_zero) {
2114 DCHECK(input.Is64Bits());
2115 // Floating point value is in an integer register. Detect -0.0 by subtracting
2116 // 1 (cmp), which will cause overflow.
2117 Cmp(input, 1);
2118 B(vs, on_negative_zero);
2119 }
2120
2121
ClampInt32ToUint8(Register output,Register input)2122 void MacroAssembler::ClampInt32ToUint8(Register output, Register input) {
2123 // Clamp the value to [0..255].
2124 Cmp(input.W(), Operand(input.W(), UXTB));
2125 // If input < input & 0xff, it must be < 0, so saturate to 0.
2126 Csel(output.W(), wzr, input.W(), lt);
2127 // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
2128 Csel(output.W(), output.W(), 255, le);
2129 }
2130
2131
ClampInt32ToUint8(Register in_out)2132 void MacroAssembler::ClampInt32ToUint8(Register in_out) {
2133 ClampInt32ToUint8(in_out, in_out);
2134 }
2135
2136
ClampDoubleToUint8(Register output,DoubleRegister input,DoubleRegister dbl_scratch)2137 void MacroAssembler::ClampDoubleToUint8(Register output,
2138 DoubleRegister input,
2139 DoubleRegister dbl_scratch) {
2140 // This conversion follows the WebIDL "[Clamp]" rules for PIXEL types:
2141 // - Inputs lower than 0 (including -infinity) produce 0.
2142 // - Inputs higher than 255 (including +infinity) produce 255.
2143 // Also, it seems that PIXEL types use round-to-nearest rather than
2144 // round-towards-zero.
2145
2146 // Squash +infinity before the conversion, since Fcvtnu will normally
2147 // convert it to 0.
2148 Fmov(dbl_scratch, 255);
2149 Fmin(dbl_scratch, dbl_scratch, input);
2150
2151 // Convert double to unsigned integer. Values less than zero become zero.
2152 // Values greater than 255 have already been clamped to 255.
2153 Fcvtnu(output, dbl_scratch);
2154 }
2155
InitializeFieldsWithFiller(Register current_address,Register end_address,Register filler)2156 void MacroAssembler::InitializeFieldsWithFiller(Register current_address,
2157 Register end_address,
2158 Register filler) {
2159 DCHECK(!current_address.Is(csp));
2160 UseScratchRegisterScope temps(this);
2161 Register distance_in_words = temps.AcquireX();
2162 Label done;
2163
2164 // Calculate the distance. If it's <= zero then there's nothing to do.
2165 Subs(distance_in_words, end_address, current_address);
2166 B(le, &done);
2167
2168 // There's at least one field to fill, so do this unconditionally.
2169 Str(filler, MemOperand(current_address));
2170
2171 // If the distance_in_words consists of odd number of words we advance
2172 // start_address by one word, otherwise the pairs loop will ovwerite the
2173 // field that was stored above.
2174 And(distance_in_words, distance_in_words, kPointerSize);
2175 Add(current_address, current_address, distance_in_words);
2176
2177 // Store filler to memory in pairs.
2178 Label loop, entry;
2179 B(&entry);
2180 Bind(&loop);
2181 Stp(filler, filler, MemOperand(current_address, 2 * kPointerSize, PostIndex));
2182 Bind(&entry);
2183 Cmp(current_address, end_address);
2184 B(lo, &loop);
2185
2186 Bind(&done);
2187 }
2188
JumpIfBothInstanceTypesAreNotSequentialOneByte(Register first,Register second,Register scratch1,Register scratch2,Label * failure)2189 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
2190 Register first, Register second, Register scratch1, Register scratch2,
2191 Label* failure) {
2192 DCHECK(!AreAliased(first, second, scratch1, scratch2));
2193 const int kFlatOneByteStringMask =
2194 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
2195 const int kFlatOneByteStringTag =
2196 kStringTag | kOneByteStringTag | kSeqStringTag;
2197 And(scratch1, first, kFlatOneByteStringMask);
2198 And(scratch2, second, kFlatOneByteStringMask);
2199 Cmp(scratch1, kFlatOneByteStringTag);
2200 Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
2201 B(ne, failure);
2202 }
2203
2204
JumpIfNotUniqueNameInstanceType(Register type,Label * not_unique_name)2205 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
2206 Label* not_unique_name) {
2207 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
2208 // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
2209 // continue
2210 // } else {
2211 // goto not_unique_name
2212 // }
2213 Tst(type, kIsNotStringMask | kIsNotInternalizedMask);
2214 Ccmp(type, SYMBOL_TYPE, ZFlag, ne);
2215 B(ne, not_unique_name);
2216 }
2217
PrepareForTailCall(const ParameterCount & callee_args_count,Register caller_args_count_reg,Register scratch0,Register scratch1)2218 void MacroAssembler::PrepareForTailCall(const ParameterCount& callee_args_count,
2219 Register caller_args_count_reg,
2220 Register scratch0, Register scratch1) {
2221 #if DEBUG
2222 if (callee_args_count.is_reg()) {
2223 DCHECK(!AreAliased(callee_args_count.reg(), caller_args_count_reg, scratch0,
2224 scratch1));
2225 } else {
2226 DCHECK(!AreAliased(caller_args_count_reg, scratch0, scratch1));
2227 }
2228 #endif
2229
2230 // Calculate the end of destination area where we will put the arguments
2231 // after we drop current frame. We add kPointerSize to count the receiver
2232 // argument which is not included into formal parameters count.
2233 Register dst_reg = scratch0;
2234 __ add(dst_reg, fp, Operand(caller_args_count_reg, LSL, kPointerSizeLog2));
2235 __ add(dst_reg, dst_reg,
2236 Operand(StandardFrameConstants::kCallerSPOffset + kPointerSize));
2237
2238 Register src_reg = caller_args_count_reg;
2239 // Calculate the end of source area. +kPointerSize is for the receiver.
2240 if (callee_args_count.is_reg()) {
2241 add(src_reg, jssp, Operand(callee_args_count.reg(), LSL, kPointerSizeLog2));
2242 add(src_reg, src_reg, Operand(kPointerSize));
2243 } else {
2244 add(src_reg, jssp,
2245 Operand((callee_args_count.immediate() + 1) * kPointerSize));
2246 }
2247
2248 if (FLAG_debug_code) {
2249 __ Cmp(src_reg, dst_reg);
2250 __ Check(lo, kStackAccessBelowStackPointer);
2251 }
2252
2253 // Restore caller's frame pointer and return address now as they will be
2254 // overwritten by the copying loop.
2255 __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
2256 __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
2257
2258 // Now copy callee arguments to the caller frame going backwards to avoid
2259 // callee arguments corruption (source and destination areas could overlap).
2260
2261 // Both src_reg and dst_reg are pointing to the word after the one to copy,
2262 // so they must be pre-decremented in the loop.
2263 Register tmp_reg = scratch1;
2264 Label loop, entry;
2265 __ B(&entry);
2266 __ bind(&loop);
2267 __ Ldr(tmp_reg, MemOperand(src_reg, -kPointerSize, PreIndex));
2268 __ Str(tmp_reg, MemOperand(dst_reg, -kPointerSize, PreIndex));
2269 __ bind(&entry);
2270 __ Cmp(jssp, src_reg);
2271 __ B(ne, &loop);
2272
2273 // Leave current frame.
2274 __ Mov(jssp, dst_reg);
2275 __ SetStackPointer(jssp);
2276 __ AssertStackConsistency();
2277 }
2278
InvokePrologue(const ParameterCount & expected,const ParameterCount & actual,Label * done,InvokeFlag flag,bool * definitely_mismatches,const CallWrapper & call_wrapper)2279 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
2280 const ParameterCount& actual,
2281 Label* done,
2282 InvokeFlag flag,
2283 bool* definitely_mismatches,
2284 const CallWrapper& call_wrapper) {
2285 bool definitely_matches = false;
2286 *definitely_mismatches = false;
2287 Label regular_invoke;
2288
2289 // Check whether the expected and actual arguments count match. If not,
2290 // setup registers according to contract with ArgumentsAdaptorTrampoline:
2291 // x0: actual arguments count.
2292 // x1: function (passed through to callee).
2293 // x2: expected arguments count.
2294
2295 // The code below is made a lot easier because the calling code already sets
2296 // up actual and expected registers according to the contract if values are
2297 // passed in registers.
2298 DCHECK(actual.is_immediate() || actual.reg().is(x0));
2299 DCHECK(expected.is_immediate() || expected.reg().is(x2));
2300
2301 if (expected.is_immediate()) {
2302 DCHECK(actual.is_immediate());
2303 Mov(x0, actual.immediate());
2304 if (expected.immediate() == actual.immediate()) {
2305 definitely_matches = true;
2306
2307 } else {
2308 if (expected.immediate() ==
2309 SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
2310 // Don't worry about adapting arguments for builtins that
2311 // don't want that done. Skip adaption code by making it look
2312 // like we have a match between expected and actual number of
2313 // arguments.
2314 definitely_matches = true;
2315 } else {
2316 *definitely_mismatches = true;
2317 // Set up x2 for the argument adaptor.
2318 Mov(x2, expected.immediate());
2319 }
2320 }
2321
2322 } else { // expected is a register.
2323 Operand actual_op = actual.is_immediate() ? Operand(actual.immediate())
2324 : Operand(actual.reg());
2325 Mov(x0, actual_op);
2326 // If actual == expected perform a regular invocation.
2327 Cmp(expected.reg(), actual_op);
2328 B(eq, ®ular_invoke);
2329 }
2330
2331 // If the argument counts may mismatch, generate a call to the argument
2332 // adaptor.
2333 if (!definitely_matches) {
2334 Handle<Code> adaptor =
2335 isolate()->builtins()->ArgumentsAdaptorTrampoline();
2336 if (flag == CALL_FUNCTION) {
2337 call_wrapper.BeforeCall(CallSize(adaptor));
2338 Call(adaptor);
2339 call_wrapper.AfterCall();
2340 if (!*definitely_mismatches) {
2341 // If the arg counts don't match, no extra code is emitted by
2342 // MAsm::InvokeFunctionCode and we can just fall through.
2343 B(done);
2344 }
2345 } else {
2346 Jump(adaptor, RelocInfo::CODE_TARGET);
2347 }
2348 }
2349 Bind(®ular_invoke);
2350 }
2351
CheckDebugHook(Register fun,Register new_target,const ParameterCount & expected,const ParameterCount & actual)2352 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
2353 const ParameterCount& expected,
2354 const ParameterCount& actual) {
2355 Label skip_hook;
2356 ExternalReference debug_hook_active =
2357 ExternalReference::debug_hook_on_function_call_address(isolate());
2358 Mov(x4, Operand(debug_hook_active));
2359 Ldrsb(x4, MemOperand(x4));
2360 CompareAndBranch(x4, Operand(0), eq, &skip_hook);
2361 {
2362 FrameScope frame(this,
2363 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
2364 if (expected.is_reg()) {
2365 SmiTag(expected.reg());
2366 Push(expected.reg());
2367 }
2368 if (actual.is_reg()) {
2369 SmiTag(actual.reg());
2370 Push(actual.reg());
2371 }
2372 if (new_target.is_valid()) {
2373 Push(new_target);
2374 }
2375 Push(fun);
2376 Push(fun);
2377 CallRuntime(Runtime::kDebugOnFunctionCall);
2378 Pop(fun);
2379 if (new_target.is_valid()) {
2380 Pop(new_target);
2381 }
2382 if (actual.is_reg()) {
2383 Pop(actual.reg());
2384 SmiUntag(actual.reg());
2385 }
2386 if (expected.is_reg()) {
2387 Pop(expected.reg());
2388 SmiUntag(expected.reg());
2389 }
2390 }
2391 bind(&skip_hook);
2392 }
2393
2394
InvokeFunctionCode(Register function,Register new_target,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2395 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
2396 const ParameterCount& expected,
2397 const ParameterCount& actual,
2398 InvokeFlag flag,
2399 const CallWrapper& call_wrapper) {
2400 // You can't call a function without a valid frame.
2401 DCHECK(flag == JUMP_FUNCTION || has_frame());
2402 DCHECK(function.is(x1));
2403 DCHECK_IMPLIES(new_target.is_valid(), new_target.is(x3));
2404
2405 if (call_wrapper.NeedsDebugHookCheck()) {
2406 CheckDebugHook(function, new_target, expected, actual);
2407 }
2408
2409 // Clear the new.target register if not given.
2410 if (!new_target.is_valid()) {
2411 LoadRoot(x3, Heap::kUndefinedValueRootIndex);
2412 }
2413
2414 Label done;
2415 bool definitely_mismatches = false;
2416 InvokePrologue(expected, actual, &done, flag, &definitely_mismatches,
2417 call_wrapper);
2418
2419 // If we are certain that actual != expected, then we know InvokePrologue will
2420 // have handled the call through the argument adaptor mechanism.
2421 // The called function expects the call kind in x5.
2422 if (!definitely_mismatches) {
2423 // We call indirectly through the code field in the function to
2424 // allow recompilation to take effect without changing any of the
2425 // call sites.
2426 Register code = x4;
2427 Ldr(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
2428 if (flag == CALL_FUNCTION) {
2429 call_wrapper.BeforeCall(CallSize(code));
2430 Call(code);
2431 call_wrapper.AfterCall();
2432 } else {
2433 DCHECK(flag == JUMP_FUNCTION);
2434 Jump(code);
2435 }
2436 }
2437
2438 // Continue here if InvokePrologue does handle the invocation due to
2439 // mismatched parameter counts.
2440 Bind(&done);
2441 }
2442
2443
InvokeFunction(Register function,Register new_target,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2444 void MacroAssembler::InvokeFunction(Register function,
2445 Register new_target,
2446 const ParameterCount& actual,
2447 InvokeFlag flag,
2448 const CallWrapper& call_wrapper) {
2449 // You can't call a function without a valid frame.
2450 DCHECK(flag == JUMP_FUNCTION || has_frame());
2451
2452 // Contract with called JS functions requires that function is passed in x1.
2453 // (See FullCodeGenerator::Generate().)
2454 DCHECK(function.is(x1));
2455
2456 Register expected_reg = x2;
2457
2458 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2459 // The number of arguments is stored as an int32_t, and -1 is a marker
2460 // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign
2461 // extension to correctly handle it.
2462 Ldr(expected_reg, FieldMemOperand(function,
2463 JSFunction::kSharedFunctionInfoOffset));
2464 Ldrsw(expected_reg,
2465 FieldMemOperand(expected_reg,
2466 SharedFunctionInfo::kFormalParameterCountOffset));
2467
2468 ParameterCount expected(expected_reg);
2469 InvokeFunctionCode(function, new_target, expected, actual, flag,
2470 call_wrapper);
2471 }
2472
2473
InvokeFunction(Register function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2474 void MacroAssembler::InvokeFunction(Register function,
2475 const ParameterCount& expected,
2476 const ParameterCount& actual,
2477 InvokeFlag flag,
2478 const CallWrapper& call_wrapper) {
2479 // You can't call a function without a valid frame.
2480 DCHECK(flag == JUMP_FUNCTION || has_frame());
2481
2482 // Contract with called JS functions requires that function is passed in x1.
2483 // (See FullCodeGenerator::Generate().)
2484 DCHECK(function.Is(x1));
2485
2486 // Set up the context.
2487 Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
2488
2489 InvokeFunctionCode(function, no_reg, expected, actual, flag, call_wrapper);
2490 }
2491
2492
InvokeFunction(Handle<JSFunction> function,const ParameterCount & expected,const ParameterCount & actual,InvokeFlag flag,const CallWrapper & call_wrapper)2493 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
2494 const ParameterCount& expected,
2495 const ParameterCount& actual,
2496 InvokeFlag flag,
2497 const CallWrapper& call_wrapper) {
2498 // Contract with called JS functions requires that function is passed in x1.
2499 // (See FullCodeGenerator::Generate().)
2500 __ LoadObject(x1, function);
2501 InvokeFunction(x1, expected, actual, flag, call_wrapper);
2502 }
2503
2504
TryConvertDoubleToInt64(Register result,DoubleRegister double_input,Label * done)2505 void MacroAssembler::TryConvertDoubleToInt64(Register result,
2506 DoubleRegister double_input,
2507 Label* done) {
2508 // Try to convert with an FPU convert instruction. It's trivial to compute
2509 // the modulo operation on an integer register so we convert to a 64-bit
2510 // integer.
2511 //
2512 // Fcvtzs will saturate to INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff)
2513 // when the double is out of range. NaNs and infinities will be converted to 0
2514 // (as ECMA-262 requires).
2515 Fcvtzs(result.X(), double_input);
2516
2517 // The values INT64_MIN (0x800...00) or INT64_MAX (0x7ff...ff) are not
2518 // representable using a double, so if the result is one of those then we know
2519 // that saturation occured, and we need to manually handle the conversion.
2520 //
2521 // It is easy to detect INT64_MIN and INT64_MAX because adding or subtracting
2522 // 1 will cause signed overflow.
2523 Cmp(result.X(), 1);
2524 Ccmp(result.X(), -1, VFlag, vc);
2525
2526 B(vc, done);
2527 }
2528
2529
TruncateDoubleToI(Register result,DoubleRegister double_input)2530 void MacroAssembler::TruncateDoubleToI(Register result,
2531 DoubleRegister double_input) {
2532 Label done;
2533
2534 // Try to convert the double to an int64. If successful, the bottom 32 bits
2535 // contain our truncated int32 result.
2536 TryConvertDoubleToInt64(result, double_input, &done);
2537
2538 const Register old_stack_pointer = StackPointer();
2539 if (csp.Is(old_stack_pointer)) {
2540 // This currently only happens during compiler-unittest. If it arises
2541 // during regular code generation the DoubleToI stub should be updated to
2542 // cope with csp and have an extra parameter indicating which stack pointer
2543 // it should use.
2544 Push(jssp, xzr); // Push xzr to maintain csp required 16-bytes alignment.
2545 Mov(jssp, csp);
2546 SetStackPointer(jssp);
2547 }
2548
2549 // If we fell through then inline version didn't succeed - call stub instead.
2550 Push(lr, double_input);
2551
2552 DoubleToIStub stub(isolate(),
2553 jssp,
2554 result,
2555 0,
2556 true, // is_truncating
2557 true); // skip_fastpath
2558 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2559
2560 DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
2561 Pop(xzr, lr); // xzr to drop the double input on the stack.
2562
2563 if (csp.Is(old_stack_pointer)) {
2564 Mov(csp, jssp);
2565 SetStackPointer(csp);
2566 AssertStackConsistency();
2567 Pop(xzr, jssp);
2568 }
2569
2570 Bind(&done);
2571 }
2572
2573
TruncateHeapNumberToI(Register result,Register object)2574 void MacroAssembler::TruncateHeapNumberToI(Register result,
2575 Register object) {
2576 Label done;
2577 DCHECK(!result.is(object));
2578 DCHECK(jssp.Is(StackPointer()));
2579
2580 Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2581
2582 // Try to convert the double to an int64. If successful, the bottom 32 bits
2583 // contain our truncated int32 result.
2584 TryConvertDoubleToInt64(result, fp_scratch, &done);
2585
2586 // If we fell through then inline version didn't succeed - call stub instead.
2587 Push(lr);
2588 DoubleToIStub stub(isolate(),
2589 object,
2590 result,
2591 HeapNumber::kValueOffset - kHeapObjectTag,
2592 true, // is_truncating
2593 true); // skip_fastpath
2594 CallStub(&stub); // DoubleToIStub preserves any registers it needs to clobber
2595 Pop(lr);
2596
2597 Bind(&done);
2598 }
2599
StubPrologue(StackFrame::Type type,int frame_slots)2600 void MacroAssembler::StubPrologue(StackFrame::Type type, int frame_slots) {
2601 UseScratchRegisterScope temps(this);
2602 frame_slots -= TypedFrameConstants::kFixedSlotCountAboveFp;
2603 Register temp = temps.AcquireX();
2604 Mov(temp, StackFrame::TypeToMarker(type));
2605 Push(lr, fp);
2606 Mov(fp, StackPointer());
2607 Claim(frame_slots);
2608 str(temp, MemOperand(fp, TypedFrameConstants::kFrameTypeOffset));
2609 }
2610
Prologue(bool code_pre_aging)2611 void MacroAssembler::Prologue(bool code_pre_aging) {
2612 if (code_pre_aging) {
2613 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
2614 __ EmitCodeAgeSequence(stub);
2615 } else {
2616 __ EmitFrameSetupForCodeAgePatching();
2617 }
2618 }
2619
EmitLoadFeedbackVector(Register vector)2620 void MacroAssembler::EmitLoadFeedbackVector(Register vector) {
2621 Ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
2622 Ldr(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset));
2623 Ldr(vector, FieldMemOperand(vector, Cell::kValueOffset));
2624 }
2625
2626
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)2627 void MacroAssembler::EnterFrame(StackFrame::Type type,
2628 bool load_constant_pool_pointer_reg) {
2629 // Out-of-line constant pool not implemented on arm64.
2630 UNREACHABLE();
2631 }
2632
2633
EnterFrame(StackFrame::Type type)2634 void MacroAssembler::EnterFrame(StackFrame::Type type) {
2635 UseScratchRegisterScope temps(this);
2636 Register type_reg = temps.AcquireX();
2637 Register code_reg = temps.AcquireX();
2638
2639 if (type == StackFrame::INTERNAL) {
2640 DCHECK(jssp.Is(StackPointer()));
2641 Mov(type_reg, StackFrame::TypeToMarker(type));
2642 Push(lr, fp);
2643 Push(type_reg);
2644 Mov(code_reg, Operand(CodeObject()));
2645 Push(code_reg);
2646 Add(fp, jssp, InternalFrameConstants::kFixedFrameSizeFromFp);
2647 // jssp[4] : lr
2648 // jssp[3] : fp
2649 // jssp[1] : type
2650 // jssp[0] : [code object]
2651 } else if (type == StackFrame::WASM_COMPILED) {
2652 DCHECK(csp.Is(StackPointer()));
2653 Mov(type_reg, StackFrame::TypeToMarker(type));
2654 Push(lr, fp);
2655 Mov(fp, csp);
2656 Push(type_reg, xzr);
2657 // csp[3] : lr
2658 // csp[2] : fp
2659 // csp[1] : type
2660 // csp[0] : for alignment
2661 } else {
2662 DCHECK(jssp.Is(StackPointer()));
2663 Mov(type_reg, StackFrame::TypeToMarker(type));
2664 Push(lr, fp);
2665 Push(type_reg);
2666 Add(fp, jssp, TypedFrameConstants::kFixedFrameSizeFromFp);
2667 // jssp[2] : lr
2668 // jssp[1] : fp
2669 // jssp[0] : type
2670 }
2671 }
2672
2673
LeaveFrame(StackFrame::Type type)2674 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
2675 if (type == StackFrame::WASM_COMPILED) {
2676 DCHECK(csp.Is(StackPointer()));
2677 Mov(csp, fp);
2678 AssertStackConsistency();
2679 Pop(fp, lr);
2680 } else {
2681 DCHECK(jssp.Is(StackPointer()));
2682 // Drop the execution stack down to the frame pointer and restore
2683 // the caller frame pointer and return address.
2684 Mov(jssp, fp);
2685 AssertStackConsistency();
2686 Pop(fp, lr);
2687 }
2688 }
2689
2690
ExitFramePreserveFPRegs()2691 void MacroAssembler::ExitFramePreserveFPRegs() {
2692 PushCPURegList(kCallerSavedFP);
2693 }
2694
2695
ExitFrameRestoreFPRegs()2696 void MacroAssembler::ExitFrameRestoreFPRegs() {
2697 // Read the registers from the stack without popping them. The stack pointer
2698 // will be reset as part of the unwinding process.
2699 CPURegList saved_fp_regs = kCallerSavedFP;
2700 DCHECK(saved_fp_regs.Count() % 2 == 0);
2701
2702 int offset = ExitFrameConstants::kLastExitFrameField;
2703 while (!saved_fp_regs.IsEmpty()) {
2704 const CPURegister& dst0 = saved_fp_regs.PopHighestIndex();
2705 const CPURegister& dst1 = saved_fp_regs.PopHighestIndex();
2706 offset -= 2 * kDRegSize;
2707 Ldp(dst1, dst0, MemOperand(fp, offset));
2708 }
2709 }
2710
EnterBuiltinFrame(Register context,Register target,Register argc)2711 void MacroAssembler::EnterBuiltinFrame(Register context, Register target,
2712 Register argc) {
2713 Push(lr, fp, context, target);
2714 add(fp, jssp, Operand(2 * kPointerSize));
2715 Push(argc);
2716 }
2717
LeaveBuiltinFrame(Register context,Register target,Register argc)2718 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target,
2719 Register argc) {
2720 Pop(argc);
2721 Pop(target, context, fp, lr);
2722 }
2723
EnterExitFrame(bool save_doubles,const Register & scratch,int extra_space,StackFrame::Type frame_type)2724 void MacroAssembler::EnterExitFrame(bool save_doubles, const Register& scratch,
2725 int extra_space,
2726 StackFrame::Type frame_type) {
2727 DCHECK(jssp.Is(StackPointer()));
2728 DCHECK(frame_type == StackFrame::EXIT ||
2729 frame_type == StackFrame::BUILTIN_EXIT);
2730
2731 // Set up the new stack frame.
2732 Push(lr, fp);
2733 Mov(fp, StackPointer());
2734 Mov(scratch, StackFrame::TypeToMarker(frame_type));
2735 Push(scratch);
2736 Push(xzr);
2737 Mov(scratch, Operand(CodeObject()));
2738 Push(scratch);
2739 // fp[8]: CallerPC (lr)
2740 // fp -> fp[0]: CallerFP (old fp)
2741 // fp[-8]: STUB marker
2742 // fp[-16]: Space reserved for SPOffset.
2743 // jssp -> fp[-24]: CodeObject()
2744 STATIC_ASSERT((2 * kPointerSize) == ExitFrameConstants::kCallerSPOffset);
2745 STATIC_ASSERT((1 * kPointerSize) == ExitFrameConstants::kCallerPCOffset);
2746 STATIC_ASSERT((0 * kPointerSize) == ExitFrameConstants::kCallerFPOffset);
2747 STATIC_ASSERT((-2 * kPointerSize) == ExitFrameConstants::kSPOffset);
2748 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kCodeOffset);
2749
2750 // Save the frame pointer and context pointer in the top frame.
2751 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2752 isolate())));
2753 Str(fp, MemOperand(scratch));
2754 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2755 isolate())));
2756 Str(cp, MemOperand(scratch));
2757
2758 STATIC_ASSERT((-3 * kPointerSize) == ExitFrameConstants::kLastExitFrameField);
2759 if (save_doubles) {
2760 ExitFramePreserveFPRegs();
2761 }
2762
2763 // Reserve space for the return address and for user requested memory.
2764 // We do this before aligning to make sure that we end up correctly
2765 // aligned with the minimum of wasted space.
2766 Claim(extra_space + 1, kXRegSize);
2767 // fp[8]: CallerPC (lr)
2768 // fp -> fp[0]: CallerFP (old fp)
2769 // fp[-8]: STUB marker
2770 // fp[-16]: Space reserved for SPOffset.
2771 // fp[-24]: CodeObject()
2772 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2773 // jssp[8]: Extra space reserved for caller (if extra_space != 0).
2774 // jssp -> jssp[0]: Space reserved for the return address.
2775
2776 // Align and synchronize the system stack pointer with jssp.
2777 AlignAndSetCSPForFrame();
2778 DCHECK(csp.Is(StackPointer()));
2779
2780 // fp[8]: CallerPC (lr)
2781 // fp -> fp[0]: CallerFP (old fp)
2782 // fp[-8]: STUB marker
2783 // fp[-16]: Space reserved for SPOffset.
2784 // fp[-24]: CodeObject()
2785 // fp[-24 - fp_size]: Saved doubles (if save_doubles is true).
2786 // csp[8]: Memory reserved for the caller if extra_space != 0.
2787 // Alignment padding, if necessary.
2788 // csp -> csp[0]: Space reserved for the return address.
2789
2790 // ExitFrame::GetStateForFramePointer expects to find the return address at
2791 // the memory address immediately below the pointer stored in SPOffset.
2792 // It is not safe to derive much else from SPOffset, because the size of the
2793 // padding can vary.
2794 Add(scratch, csp, kXRegSize);
2795 Str(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
2796 }
2797
2798
2799 // Leave the current exit frame.
LeaveExitFrame(bool restore_doubles,const Register & scratch,bool restore_context)2800 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
2801 const Register& scratch,
2802 bool restore_context) {
2803 DCHECK(csp.Is(StackPointer()));
2804
2805 if (restore_doubles) {
2806 ExitFrameRestoreFPRegs();
2807 }
2808
2809 // Restore the context pointer from the top frame.
2810 if (restore_context) {
2811 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2812 isolate())));
2813 Ldr(cp, MemOperand(scratch));
2814 }
2815
2816 if (emit_debug_code()) {
2817 // Also emit debug code to clear the cp in the top frame.
2818 Mov(scratch, Operand(ExternalReference(Isolate::kContextAddress,
2819 isolate())));
2820 Str(xzr, MemOperand(scratch));
2821 }
2822 // Clear the frame pointer from the top frame.
2823 Mov(scratch, Operand(ExternalReference(Isolate::kCEntryFPAddress,
2824 isolate())));
2825 Str(xzr, MemOperand(scratch));
2826
2827 // Pop the exit frame.
2828 // fp[8]: CallerPC (lr)
2829 // fp -> fp[0]: CallerFP (old fp)
2830 // fp[...]: The rest of the frame.
2831 Mov(jssp, fp);
2832 SetStackPointer(jssp);
2833 AssertStackConsistency();
2834 Pop(fp, lr);
2835 }
2836
2837
SetCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2838 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2839 Register scratch1, Register scratch2) {
2840 if (FLAG_native_code_counters && counter->Enabled()) {
2841 Mov(scratch1, value);
2842 Mov(scratch2, ExternalReference(counter));
2843 Str(scratch1.W(), MemOperand(scratch2));
2844 }
2845 }
2846
2847
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2848 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2849 Register scratch1, Register scratch2) {
2850 DCHECK(value != 0);
2851 if (FLAG_native_code_counters && counter->Enabled()) {
2852 Mov(scratch2, ExternalReference(counter));
2853 Ldr(scratch1.W(), MemOperand(scratch2));
2854 Add(scratch1.W(), scratch1.W(), value);
2855 Str(scratch1.W(), MemOperand(scratch2));
2856 }
2857 }
2858
2859
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)2860 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2861 Register scratch1, Register scratch2) {
2862 IncrementCounter(counter, -value, scratch1, scratch2);
2863 }
2864
2865
LoadContext(Register dst,int context_chain_length)2866 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2867 if (context_chain_length > 0) {
2868 // Move up the chain of contexts to the context containing the slot.
2869 Ldr(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2870 for (int i = 1; i < context_chain_length; i++) {
2871 Ldr(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2872 }
2873 } else {
2874 // Slot is in the current function context. Move it into the
2875 // destination register in case we store into it (the write barrier
2876 // cannot be allowed to destroy the context in cp).
2877 Mov(dst, cp);
2878 }
2879 }
2880
MaybeDropFrames()2881 void MacroAssembler::MaybeDropFrames() {
2882 // Check whether we need to drop frames to restart a function on the stack.
2883 ExternalReference restart_fp =
2884 ExternalReference::debug_restart_fp_address(isolate());
2885 Mov(x1, Operand(restart_fp));
2886 Ldr(x1, MemOperand(x1));
2887 Tst(x1, x1);
2888 Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET,
2889 ne);
2890 }
2891
PushStackHandler()2892 void MacroAssembler::PushStackHandler() {
2893 DCHECK(jssp.Is(StackPointer()));
2894 // Adjust this code if the asserts don't hold.
2895 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize);
2896 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2897
2898 // For the JSEntry handler, we must preserve the live registers x0-x4.
2899 // (See JSEntryStub::GenerateBody().)
2900
2901 // Link the current handler as the next handler.
2902 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
2903 Ldr(x10, MemOperand(x11));
2904 Push(x10);
2905
2906 // Set this new handler as the current one.
2907 Str(jssp, MemOperand(x11));
2908 }
2909
2910
PopStackHandler()2911 void MacroAssembler::PopStackHandler() {
2912 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2913 Pop(x10);
2914 Mov(x11, ExternalReference(Isolate::kHandlerAddress, isolate()));
2915 Drop(StackHandlerConstants::kSize - kXRegSize, kByteSizeInBytes);
2916 Str(x10, MemOperand(x11));
2917 }
2918
2919
Allocate(int object_size,Register result,Register scratch1,Register scratch2,Label * gc_required,AllocationFlags flags)2920 void MacroAssembler::Allocate(int object_size,
2921 Register result,
2922 Register scratch1,
2923 Register scratch2,
2924 Label* gc_required,
2925 AllocationFlags flags) {
2926 DCHECK(object_size <= kMaxRegularHeapObjectSize);
2927 DCHECK((flags & ALLOCATION_FOLDED) == 0);
2928 if (!FLAG_inline_new) {
2929 if (emit_debug_code()) {
2930 // Trash the registers to simulate an allocation failure.
2931 // We apply salt to the original zap value to easily spot the values.
2932 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
2933 Mov(scratch1, (kDebugZapValue & ~0xffL) | 0x21L);
2934 Mov(scratch2, (kDebugZapValue & ~0xffL) | 0x21L);
2935 }
2936 B(gc_required);
2937 return;
2938 }
2939
2940 UseScratchRegisterScope temps(this);
2941 Register scratch3 = temps.AcquireX();
2942
2943 DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
2944 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
2945
2946 // Make object size into bytes.
2947 if ((flags & SIZE_IN_WORDS) != 0) {
2948 object_size *= kPointerSize;
2949 }
2950 DCHECK(0 == (object_size & kObjectAlignmentMask));
2951
2952 // Check relative positions of allocation top and limit addresses.
2953 // The values must be adjacent in memory to allow the use of LDP.
2954 ExternalReference heap_allocation_top =
2955 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2956 ExternalReference heap_allocation_limit =
2957 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2958 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
2959 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
2960 DCHECK((limit - top) == kPointerSize);
2961
2962 // Set up allocation top address and allocation limit registers.
2963 Register top_address = scratch1;
2964 Register alloc_limit = scratch2;
2965 Register result_end = scratch3;
2966 Mov(top_address, Operand(heap_allocation_top));
2967
2968 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2969 // Load allocation top into result and allocation limit into alloc_limit.
2970 Ldp(result, alloc_limit, MemOperand(top_address));
2971 } else {
2972 if (emit_debug_code()) {
2973 // Assert that result actually contains top on entry.
2974 Ldr(alloc_limit, MemOperand(top_address));
2975 Cmp(result, alloc_limit);
2976 Check(eq, kUnexpectedAllocationTop);
2977 }
2978 // Load allocation limit. Result already contains allocation top.
2979 Ldr(alloc_limit, MemOperand(top_address, limit - top));
2980 }
2981
2982 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
2983 // the same alignment on ARM64.
2984 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
2985
2986 // Calculate new top and bail out if new space is exhausted.
2987 Adds(result_end, result, object_size);
2988 Ccmp(result_end, alloc_limit, NoFlag, cc);
2989 B(hi, gc_required);
2990
2991 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
2992 // The top pointer is not updated for allocation folding dominators.
2993 Str(result_end, MemOperand(top_address));
2994 }
2995
2996 // Tag the object.
2997 ObjectTag(result, result);
2998 }
2999
3000
Allocate(Register object_size,Register result,Register result_end,Register scratch,Label * gc_required,AllocationFlags flags)3001 void MacroAssembler::Allocate(Register object_size, Register result,
3002 Register result_end, Register scratch,
3003 Label* gc_required, AllocationFlags flags) {
3004 if (!FLAG_inline_new) {
3005 if (emit_debug_code()) {
3006 // Trash the registers to simulate an allocation failure.
3007 // We apply salt to the original zap value to easily spot the values.
3008 Mov(result, (kDebugZapValue & ~0xffL) | 0x11L);
3009 Mov(scratch, (kDebugZapValue & ~0xffL) | 0x21L);
3010 Mov(result_end, (kDebugZapValue & ~0xffL) | 0x21L);
3011 }
3012 B(gc_required);
3013 return;
3014 }
3015
3016 UseScratchRegisterScope temps(this);
3017 Register scratch2 = temps.AcquireX();
3018
3019 // |object_size| and |result_end| may overlap, other registers must not.
3020 DCHECK(!AreAliased(object_size, result, scratch, scratch2));
3021 DCHECK(!AreAliased(result_end, result, scratch, scratch2));
3022 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3023 result_end.Is64Bits());
3024
3025 // Check relative positions of allocation top and limit addresses.
3026 // The values must be adjacent in memory to allow the use of LDP.
3027 ExternalReference heap_allocation_top =
3028 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3029 ExternalReference heap_allocation_limit =
3030 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3031 intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
3032 intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
3033 DCHECK((limit - top) == kPointerSize);
3034
3035 // Set up allocation top address and allocation limit registers.
3036 Register top_address = scratch;
3037 Register alloc_limit = scratch2;
3038 Mov(top_address, heap_allocation_top);
3039
3040 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3041 // Load allocation top into result and allocation limit into alloc_limit.
3042 Ldp(result, alloc_limit, MemOperand(top_address));
3043 } else {
3044 if (emit_debug_code()) {
3045 // Assert that result actually contains top on entry.
3046 Ldr(alloc_limit, MemOperand(top_address));
3047 Cmp(result, alloc_limit);
3048 Check(eq, kUnexpectedAllocationTop);
3049 }
3050 // Load allocation limit. Result already contains allocation top.
3051 Ldr(alloc_limit, MemOperand(top_address, limit - top));
3052 }
3053
3054 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3055 // the same alignment on ARM64.
3056 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3057
3058 // Calculate new top and bail out if new space is exhausted
3059 if ((flags & SIZE_IN_WORDS) != 0) {
3060 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3061 } else {
3062 Adds(result_end, result, object_size);
3063 }
3064
3065 if (emit_debug_code()) {
3066 Tst(result_end, kObjectAlignmentMask);
3067 Check(eq, kUnalignedAllocationInNewSpace);
3068 }
3069
3070 Ccmp(result_end, alloc_limit, NoFlag, cc);
3071 B(hi, gc_required);
3072
3073 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
3074 // The top pointer is not updated for allocation folding dominators.
3075 Str(result_end, MemOperand(top_address));
3076 }
3077
3078 // Tag the object.
3079 ObjectTag(result, result);
3080 }
3081
FastAllocate(int object_size,Register result,Register scratch1,Register scratch2,AllocationFlags flags)3082 void MacroAssembler::FastAllocate(int object_size, Register result,
3083 Register scratch1, Register scratch2,
3084 AllocationFlags flags) {
3085 DCHECK(object_size <= kMaxRegularHeapObjectSize);
3086
3087 DCHECK(!AreAliased(result, scratch1, scratch2));
3088 DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
3089
3090 // Make object size into bytes.
3091 if ((flags & SIZE_IN_WORDS) != 0) {
3092 object_size *= kPointerSize;
3093 }
3094 DCHECK(0 == (object_size & kObjectAlignmentMask));
3095
3096 ExternalReference heap_allocation_top =
3097 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3098
3099 // Set up allocation top address and allocation limit registers.
3100 Register top_address = scratch1;
3101 Register result_end = scratch2;
3102 Mov(top_address, Operand(heap_allocation_top));
3103 Ldr(result, MemOperand(top_address));
3104
3105 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3106 // the same alignment on ARM64.
3107 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3108
3109 // Calculate new top and write it back.
3110 Adds(result_end, result, object_size);
3111 Str(result_end, MemOperand(top_address));
3112
3113 ObjectTag(result, result);
3114 }
3115
FastAllocate(Register object_size,Register result,Register result_end,Register scratch,AllocationFlags flags)3116 void MacroAssembler::FastAllocate(Register object_size, Register result,
3117 Register result_end, Register scratch,
3118 AllocationFlags flags) {
3119 // |object_size| and |result_end| may overlap, other registers must not.
3120 DCHECK(!AreAliased(object_size, result, scratch));
3121 DCHECK(!AreAliased(result_end, result, scratch));
3122 DCHECK(object_size.Is64Bits() && result.Is64Bits() && scratch.Is64Bits() &&
3123 result_end.Is64Bits());
3124
3125 ExternalReference heap_allocation_top =
3126 AllocationUtils::GetAllocationTopReference(isolate(), flags);
3127
3128 // Set up allocation top address and allocation limit registers.
3129 Register top_address = scratch;
3130 Mov(top_address, heap_allocation_top);
3131 Ldr(result, MemOperand(top_address));
3132
3133 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have
3134 // the same alignment on ARM64.
3135 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
3136
3137 // Calculate new top and write it back.
3138 if ((flags & SIZE_IN_WORDS) != 0) {
3139 Adds(result_end, result, Operand(object_size, LSL, kPointerSizeLog2));
3140 } else {
3141 Adds(result_end, result, object_size);
3142 }
3143 Str(result_end, MemOperand(top_address));
3144
3145 if (emit_debug_code()) {
3146 Tst(result_end, kObjectAlignmentMask);
3147 Check(eq, kUnalignedAllocationInNewSpace);
3148 }
3149
3150 ObjectTag(result, result);
3151 }
3152
3153 // Allocates a heap number or jumps to the need_gc label if the young space
3154 // is full and a scavenge is needed.
AllocateHeapNumber(Register result,Label * gc_required,Register scratch1,Register scratch2,CPURegister value,CPURegister heap_number_map,MutableMode mode)3155 void MacroAssembler::AllocateHeapNumber(Register result,
3156 Label* gc_required,
3157 Register scratch1,
3158 Register scratch2,
3159 CPURegister value,
3160 CPURegister heap_number_map,
3161 MutableMode mode) {
3162 DCHECK(!value.IsValid() || value.Is64Bits());
3163 UseScratchRegisterScope temps(this);
3164
3165 // Allocate an object in the heap for the heap number and tag it as a heap
3166 // object.
3167 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3168 NO_ALLOCATION_FLAGS);
3169
3170 Heap::RootListIndex map_index = mode == MUTABLE
3171 ? Heap::kMutableHeapNumberMapRootIndex
3172 : Heap::kHeapNumberMapRootIndex;
3173
3174 // Prepare the heap number map.
3175 if (!heap_number_map.IsValid()) {
3176 // If we have a valid value register, use the same type of register to store
3177 // the map so we can use STP to store both in one instruction.
3178 if (value.IsValid() && value.IsFPRegister()) {
3179 heap_number_map = temps.AcquireD();
3180 } else {
3181 heap_number_map = scratch1;
3182 }
3183 LoadRoot(heap_number_map, map_index);
3184 }
3185 if (emit_debug_code()) {
3186 Register map;
3187 if (heap_number_map.IsFPRegister()) {
3188 map = scratch1;
3189 Fmov(map, DoubleRegister(heap_number_map));
3190 } else {
3191 map = Register(heap_number_map);
3192 }
3193 AssertRegisterIsRoot(map, map_index);
3194 }
3195
3196 // Store the heap number map and the value in the allocated object.
3197 if (value.IsSameSizeAndType(heap_number_map)) {
3198 STATIC_ASSERT(HeapObject::kMapOffset + kPointerSize ==
3199 HeapNumber::kValueOffset);
3200 Stp(heap_number_map, value,
3201 FieldMemOperand(result, HeapObject::kMapOffset));
3202 } else {
3203 Str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3204 if (value.IsValid()) {
3205 Str(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3206 }
3207 }
3208 }
3209
3210
JumpIfObjectType(Register object,Register map,Register type_reg,InstanceType type,Label * if_cond_pass,Condition cond)3211 void MacroAssembler::JumpIfObjectType(Register object,
3212 Register map,
3213 Register type_reg,
3214 InstanceType type,
3215 Label* if_cond_pass,
3216 Condition cond) {
3217 CompareObjectType(object, map, type_reg, type);
3218 B(cond, if_cond_pass);
3219 }
3220
3221
AllocateJSValue(Register result,Register constructor,Register value,Register scratch1,Register scratch2,Label * gc_required)3222 void MacroAssembler::AllocateJSValue(Register result, Register constructor,
3223 Register value, Register scratch1,
3224 Register scratch2, Label* gc_required) {
3225 DCHECK(!result.is(constructor));
3226 DCHECK(!result.is(scratch1));
3227 DCHECK(!result.is(scratch2));
3228 DCHECK(!result.is(value));
3229
3230 // Allocate JSValue in new space.
3231 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
3232 NO_ALLOCATION_FLAGS);
3233
3234 // Initialize the JSValue.
3235 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
3236 Str(scratch1, FieldMemOperand(result, HeapObject::kMapOffset));
3237 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex);
3238 Str(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset));
3239 Str(scratch1, FieldMemOperand(result, JSObject::kElementsOffset));
3240 Str(value, FieldMemOperand(result, JSValue::kValueOffset));
3241 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
3242 }
3243
3244
JumpIfNotObjectType(Register object,Register map,Register type_reg,InstanceType type,Label * if_not_object)3245 void MacroAssembler::JumpIfNotObjectType(Register object,
3246 Register map,
3247 Register type_reg,
3248 InstanceType type,
3249 Label* if_not_object) {
3250 JumpIfObjectType(object, map, type_reg, type, if_not_object, ne);
3251 }
3252
3253
3254 // Sets condition flags based on comparison, and returns type in type_reg.
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)3255 void MacroAssembler::CompareObjectType(Register object,
3256 Register map,
3257 Register type_reg,
3258 InstanceType type) {
3259 Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset));
3260 CompareInstanceType(map, type_reg, type);
3261 }
3262
3263
3264 // Sets condition flags based on comparison, and returns type in type_reg.
CompareInstanceType(Register map,Register type_reg,InstanceType type)3265 void MacroAssembler::CompareInstanceType(Register map,
3266 Register type_reg,
3267 InstanceType type) {
3268 Ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3269 Cmp(type_reg, type);
3270 }
3271
3272
CompareObjectMap(Register obj,Heap::RootListIndex index)3273 void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
3274 UseScratchRegisterScope temps(this);
3275 Register obj_map = temps.AcquireX();
3276 Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
3277 CompareRoot(obj_map, index);
3278 }
3279
3280
CompareObjectMap(Register obj,Register scratch,Handle<Map> map)3281 void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
3282 Handle<Map> map) {
3283 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3284 CompareMap(scratch, map);
3285 }
3286
3287
CompareMap(Register obj_map,Handle<Map> map)3288 void MacroAssembler::CompareMap(Register obj_map,
3289 Handle<Map> map) {
3290 Cmp(obj_map, Operand(map));
3291 }
3292
3293
CheckMap(Register obj,Register scratch,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3294 void MacroAssembler::CheckMap(Register obj,
3295 Register scratch,
3296 Handle<Map> map,
3297 Label* fail,
3298 SmiCheckType smi_check_type) {
3299 if (smi_check_type == DO_SMI_CHECK) {
3300 JumpIfSmi(obj, fail);
3301 }
3302
3303 CompareObjectMap(obj, scratch, map);
3304 B(ne, fail);
3305 }
3306
3307
CheckMap(Register obj,Register scratch,Heap::RootListIndex index,Label * fail,SmiCheckType smi_check_type)3308 void MacroAssembler::CheckMap(Register obj,
3309 Register scratch,
3310 Heap::RootListIndex index,
3311 Label* fail,
3312 SmiCheckType smi_check_type) {
3313 if (smi_check_type == DO_SMI_CHECK) {
3314 JumpIfSmi(obj, fail);
3315 }
3316 Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3317 JumpIfNotRoot(scratch, index, fail);
3318 }
3319
3320
CheckMap(Register obj_map,Handle<Map> map,Label * fail,SmiCheckType smi_check_type)3321 void MacroAssembler::CheckMap(Register obj_map,
3322 Handle<Map> map,
3323 Label* fail,
3324 SmiCheckType smi_check_type) {
3325 if (smi_check_type == DO_SMI_CHECK) {
3326 JumpIfSmi(obj_map, fail);
3327 }
3328
3329 CompareMap(obj_map, map);
3330 B(ne, fail);
3331 }
3332
3333
DispatchWeakMap(Register obj,Register scratch1,Register scratch2,Handle<WeakCell> cell,Handle<Code> success,SmiCheckType smi_check_type)3334 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
3335 Register scratch2, Handle<WeakCell> cell,
3336 Handle<Code> success,
3337 SmiCheckType smi_check_type) {
3338 Label fail;
3339 if (smi_check_type == DO_SMI_CHECK) {
3340 JumpIfSmi(obj, &fail);
3341 }
3342 Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
3343 CmpWeakValue(scratch1, cell, scratch2);
3344 B(ne, &fail);
3345 Jump(success, RelocInfo::CODE_TARGET);
3346 Bind(&fail);
3347 }
3348
3349
CmpWeakValue(Register value,Handle<WeakCell> cell,Register scratch)3350 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
3351 Register scratch) {
3352 Mov(scratch, Operand(cell));
3353 Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
3354 Cmp(value, scratch);
3355 }
3356
3357
GetWeakValue(Register value,Handle<WeakCell> cell)3358 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
3359 Mov(value, Operand(cell));
3360 Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
3361 }
3362
3363
LoadWeakValue(Register value,Handle<WeakCell> cell,Label * miss)3364 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
3365 Label* miss) {
3366 GetWeakValue(value, cell);
3367 JumpIfSmi(value, miss);
3368 }
3369
3370
TestMapBitfield(Register object,uint64_t mask)3371 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
3372 UseScratchRegisterScope temps(this);
3373 Register temp = temps.AcquireX();
3374 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3375 Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
3376 Tst(temp, mask);
3377 }
3378
3379
LoadElementsKindFromMap(Register result,Register map)3380 void MacroAssembler::LoadElementsKindFromMap(Register result, Register map) {
3381 // Load the map's "bit field 2".
3382 __ Ldrb(result, FieldMemOperand(map, Map::kBitField2Offset));
3383 // Retrieve elements_kind from bit field 2.
3384 DecodeField<Map::ElementsKindBits>(result);
3385 }
3386
3387
GetMapConstructor(Register result,Register map,Register temp,Register temp2)3388 void MacroAssembler::GetMapConstructor(Register result, Register map,
3389 Register temp, Register temp2) {
3390 Label done, loop;
3391 Ldr(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset));
3392 Bind(&loop);
3393 JumpIfSmi(result, &done);
3394 CompareObjectType(result, temp, temp2, MAP_TYPE);
3395 B(ne, &done);
3396 Ldr(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset));
3397 B(&loop);
3398 Bind(&done);
3399 }
3400
PushRoot(Heap::RootListIndex index)3401 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
3402 UseScratchRegisterScope temps(this);
3403 Register temp = temps.AcquireX();
3404 LoadRoot(temp, index);
3405 Push(temp);
3406 }
3407
3408
CompareRoot(const Register & obj,Heap::RootListIndex index)3409 void MacroAssembler::CompareRoot(const Register& obj,
3410 Heap::RootListIndex index) {
3411 UseScratchRegisterScope temps(this);
3412 Register temp = temps.AcquireX();
3413 DCHECK(!AreAliased(obj, temp));
3414 LoadRoot(temp, index);
3415 Cmp(obj, temp);
3416 }
3417
3418
JumpIfRoot(const Register & obj,Heap::RootListIndex index,Label * if_equal)3419 void MacroAssembler::JumpIfRoot(const Register& obj,
3420 Heap::RootListIndex index,
3421 Label* if_equal) {
3422 CompareRoot(obj, index);
3423 B(eq, if_equal);
3424 }
3425
3426
JumpIfNotRoot(const Register & obj,Heap::RootListIndex index,Label * if_not_equal)3427 void MacroAssembler::JumpIfNotRoot(const Register& obj,
3428 Heap::RootListIndex index,
3429 Label* if_not_equal) {
3430 CompareRoot(obj, index);
3431 B(ne, if_not_equal);
3432 }
3433
3434
CompareAndSplit(const Register & lhs,const Operand & rhs,Condition cond,Label * if_true,Label * if_false,Label * fall_through)3435 void MacroAssembler::CompareAndSplit(const Register& lhs,
3436 const Operand& rhs,
3437 Condition cond,
3438 Label* if_true,
3439 Label* if_false,
3440 Label* fall_through) {
3441 if ((if_true == if_false) && (if_false == fall_through)) {
3442 // Fall through.
3443 } else if (if_true == if_false) {
3444 B(if_true);
3445 } else if (if_false == fall_through) {
3446 CompareAndBranch(lhs, rhs, cond, if_true);
3447 } else if (if_true == fall_through) {
3448 CompareAndBranch(lhs, rhs, NegateCondition(cond), if_false);
3449 } else {
3450 CompareAndBranch(lhs, rhs, cond, if_true);
3451 B(if_false);
3452 }
3453 }
3454
3455
TestAndSplit(const Register & reg,uint64_t bit_pattern,Label * if_all_clear,Label * if_any_set,Label * fall_through)3456 void MacroAssembler::TestAndSplit(const Register& reg,
3457 uint64_t bit_pattern,
3458 Label* if_all_clear,
3459 Label* if_any_set,
3460 Label* fall_through) {
3461 if ((if_all_clear == if_any_set) && (if_any_set == fall_through)) {
3462 // Fall through.
3463 } else if (if_all_clear == if_any_set) {
3464 B(if_all_clear);
3465 } else if (if_all_clear == fall_through) {
3466 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3467 } else if (if_any_set == fall_through) {
3468 TestAndBranchIfAllClear(reg, bit_pattern, if_all_clear);
3469 } else {
3470 TestAndBranchIfAnySet(reg, bit_pattern, if_any_set);
3471 B(if_all_clear);
3472 }
3473 }
3474
AllowThisStubCall(CodeStub * stub)3475 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
3476 return has_frame_ || !stub->SometimesSetsUpAFrame();
3477 }
3478
EmitSeqStringSetCharCheck(Register string,Register index,SeqStringSetCharCheckIndexType index_type,Register scratch,uint32_t encoding_mask)3479 void MacroAssembler::EmitSeqStringSetCharCheck(
3480 Register string,
3481 Register index,
3482 SeqStringSetCharCheckIndexType index_type,
3483 Register scratch,
3484 uint32_t encoding_mask) {
3485 DCHECK(!AreAliased(string, index, scratch));
3486
3487 if (index_type == kIndexIsSmi) {
3488 AssertSmi(index);
3489 }
3490
3491 // Check that string is an object.
3492 AssertNotSmi(string, kNonObject);
3493
3494 // Check that string has an appropriate map.
3495 Ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
3496 Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3497
3498 And(scratch, scratch, kStringRepresentationMask | kStringEncodingMask);
3499 Cmp(scratch, encoding_mask);
3500 Check(eq, kUnexpectedStringType);
3501
3502 Ldr(scratch, FieldMemOperand(string, String::kLengthOffset));
3503 Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
3504 Check(lt, kIndexIsTooLarge);
3505
3506 DCHECK_EQ(static_cast<Smi*>(0), Smi::kZero);
3507 Cmp(index, 0);
3508 Check(ge, kIndexIsNegative);
3509 }
3510
3511
3512 // Compute the hash code from the untagged key. This must be kept in sync with
3513 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
3514 // code-stub-hydrogen.cc
GetNumberHash(Register key,Register scratch)3515 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
3516 DCHECK(!AreAliased(key, scratch));
3517
3518 // Xor original key with a seed.
3519 LoadRoot(scratch, Heap::kHashSeedRootIndex);
3520 Eor(key, key, Operand::UntagSmi(scratch));
3521
3522 // The algorithm uses 32-bit integer values.
3523 key = key.W();
3524 scratch = scratch.W();
3525
3526 // Compute the hash code from the untagged key. This must be kept in sync
3527 // with ComputeIntegerHash in utils.h.
3528 //
3529 // hash = ~hash + (hash <<1 15);
3530 Mvn(scratch, key);
3531 Add(key, scratch, Operand(key, LSL, 15));
3532 // hash = hash ^ (hash >> 12);
3533 Eor(key, key, Operand(key, LSR, 12));
3534 // hash = hash + (hash << 2);
3535 Add(key, key, Operand(key, LSL, 2));
3536 // hash = hash ^ (hash >> 4);
3537 Eor(key, key, Operand(key, LSR, 4));
3538 // hash = hash * 2057;
3539 Mov(scratch, Operand(key, LSL, 11));
3540 Add(key, key, Operand(key, LSL, 3));
3541 Add(key, key, scratch);
3542 // hash = hash ^ (hash >> 16);
3543 Eor(key, key, Operand(key, LSR, 16));
3544 Bic(key, key, Operand(0xc0000000u));
3545 }
3546
RecordWriteCodeEntryField(Register js_function,Register code_entry,Register scratch)3547 void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
3548 Register code_entry,
3549 Register scratch) {
3550 const int offset = JSFunction::kCodeEntryOffset;
3551
3552 // Since a code entry (value) is always in old space, we don't need to update
3553 // remembered set. If incremental marking is off, there is nothing for us to
3554 // do.
3555 if (!FLAG_incremental_marking) return;
3556
3557 DCHECK(js_function.is(x1));
3558 DCHECK(code_entry.is(x7));
3559 DCHECK(scratch.is(x5));
3560 AssertNotSmi(js_function);
3561
3562 if (emit_debug_code()) {
3563 UseScratchRegisterScope temps(this);
3564 Register temp = temps.AcquireX();
3565 Add(scratch, js_function, offset - kHeapObjectTag);
3566 Ldr(temp, MemOperand(scratch));
3567 Cmp(temp, code_entry);
3568 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3569 }
3570
3571 // First, check if a write barrier is even needed. The tests below
3572 // catch stores of Smis and stores into young gen.
3573 Label done;
3574
3575 CheckPageFlagClear(code_entry, scratch,
3576 MemoryChunk::kPointersToHereAreInterestingMask, &done);
3577 CheckPageFlagClear(js_function, scratch,
3578 MemoryChunk::kPointersFromHereAreInterestingMask, &done);
3579
3580 const Register dst = scratch;
3581 Add(dst, js_function, offset - kHeapObjectTag);
3582
3583 // Save caller-saved registers.Both input registers (x1 and x7) are caller
3584 // saved, so there is no need to push them.
3585 PushCPURegList(kCallerSaved);
3586
3587 int argument_count = 3;
3588
3589 Mov(x0, js_function);
3590 Mov(x1, dst);
3591 Mov(x2, ExternalReference::isolate_address(isolate()));
3592
3593 {
3594 AllowExternalCallThatCantCauseGC scope(this);
3595 CallCFunction(
3596 ExternalReference::incremental_marking_record_write_code_entry_function(
3597 isolate()),
3598 argument_count);
3599 }
3600
3601 // Restore caller-saved registers.
3602 PopCPURegList(kCallerSaved);
3603
3604 Bind(&done);
3605 }
3606
RememberedSetHelper(Register object,Register address,Register scratch1,SaveFPRegsMode fp_mode,RememberedSetFinalAction and_then)3607 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
3608 Register address,
3609 Register scratch1,
3610 SaveFPRegsMode fp_mode,
3611 RememberedSetFinalAction and_then) {
3612 DCHECK(!AreAliased(object, address, scratch1));
3613 Label done, store_buffer_overflow;
3614 if (emit_debug_code()) {
3615 Label ok;
3616 JumpIfNotInNewSpace(object, &ok);
3617 Abort(kRememberedSetPointerInNewSpace);
3618 bind(&ok);
3619 }
3620 UseScratchRegisterScope temps(this);
3621 Register scratch2 = temps.AcquireX();
3622
3623 // Load store buffer top.
3624 Mov(scratch2, ExternalReference::store_buffer_top(isolate()));
3625 Ldr(scratch1, MemOperand(scratch2));
3626 // Store pointer to buffer and increment buffer top.
3627 Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
3628 // Write back new top of buffer.
3629 Str(scratch1, MemOperand(scratch2));
3630 // Call stub on end of buffer.
3631 // Check for end of buffer.
3632 Tst(scratch1, StoreBuffer::kStoreBufferMask);
3633 if (and_then == kFallThroughAtEnd) {
3634 B(ne, &done);
3635 } else {
3636 DCHECK(and_then == kReturnAtEnd);
3637 B(eq, &store_buffer_overflow);
3638 Ret();
3639 }
3640
3641 Bind(&store_buffer_overflow);
3642 Push(lr);
3643 StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
3644 CallStub(&store_buffer_overflow_stub);
3645 Pop(lr);
3646
3647 Bind(&done);
3648 if (and_then == kReturnAtEnd) {
3649 Ret();
3650 }
3651 }
3652
3653
PopSafepointRegisters()3654 void MacroAssembler::PopSafepointRegisters() {
3655 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3656 PopXRegList(kSafepointSavedRegisters);
3657 Drop(num_unsaved);
3658 }
3659
3660
PushSafepointRegisters()3661 void MacroAssembler::PushSafepointRegisters() {
3662 // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
3663 // adjust the stack for unsaved registers.
3664 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
3665 DCHECK(num_unsaved >= 0);
3666 Claim(num_unsaved);
3667 PushXRegList(kSafepointSavedRegisters);
3668 }
3669
3670
PushSafepointRegistersAndDoubles()3671 void MacroAssembler::PushSafepointRegistersAndDoubles() {
3672 PushSafepointRegisters();
3673 PushCPURegList(CPURegList(
3674 CPURegister::kFPRegister, kDRegSizeInBits,
3675 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
3676 }
3677
3678
PopSafepointRegistersAndDoubles()3679 void MacroAssembler::PopSafepointRegistersAndDoubles() {
3680 PopCPURegList(CPURegList(
3681 CPURegister::kFPRegister, kDRegSizeInBits,
3682 RegisterConfiguration::Crankshaft()->allocatable_double_codes_mask()));
3683 PopSafepointRegisters();
3684 }
3685
3686
SafepointRegisterStackIndex(int reg_code)3687 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
3688 // Make sure the safepoint registers list is what we expect.
3689 DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
3690
3691 // Safepoint registers are stored contiguously on the stack, but not all the
3692 // registers are saved. The following registers are excluded:
3693 // - x16 and x17 (ip0 and ip1) because they shouldn't be preserved outside of
3694 // the macro assembler.
3695 // - x28 (jssp) because JS stack pointer doesn't need to be included in
3696 // safepoint registers.
3697 // - x31 (csp) because the system stack pointer doesn't need to be included
3698 // in safepoint registers.
3699 //
3700 // This function implements the mapping of register code to index into the
3701 // safepoint register slots.
3702 if ((reg_code >= 0) && (reg_code <= 15)) {
3703 return reg_code;
3704 } else if ((reg_code >= 18) && (reg_code <= 27)) {
3705 // Skip ip0 and ip1.
3706 return reg_code - 2;
3707 } else if ((reg_code == 29) || (reg_code == 30)) {
3708 // Also skip jssp.
3709 return reg_code - 3;
3710 } else {
3711 // This register has no safepoint register slot.
3712 UNREACHABLE();
3713 return -1;
3714 }
3715 }
3716
CheckPageFlag(const Register & object,const Register & scratch,int mask,Condition cc,Label * condition_met)3717 void MacroAssembler::CheckPageFlag(const Register& object,
3718 const Register& scratch, int mask,
3719 Condition cc, Label* condition_met) {
3720 And(scratch, object, ~Page::kPageAlignmentMask);
3721 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3722 if (cc == eq) {
3723 TestAndBranchIfAnySet(scratch, mask, condition_met);
3724 } else {
3725 TestAndBranchIfAllClear(scratch, mask, condition_met);
3726 }
3727 }
3728
CheckPageFlagSet(const Register & object,const Register & scratch,int mask,Label * if_any_set)3729 void MacroAssembler::CheckPageFlagSet(const Register& object,
3730 const Register& scratch,
3731 int mask,
3732 Label* if_any_set) {
3733 And(scratch, object, ~Page::kPageAlignmentMask);
3734 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3735 TestAndBranchIfAnySet(scratch, mask, if_any_set);
3736 }
3737
3738
CheckPageFlagClear(const Register & object,const Register & scratch,int mask,Label * if_all_clear)3739 void MacroAssembler::CheckPageFlagClear(const Register& object,
3740 const Register& scratch,
3741 int mask,
3742 Label* if_all_clear) {
3743 And(scratch, object, ~Page::kPageAlignmentMask);
3744 Ldr(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3745 TestAndBranchIfAllClear(scratch, mask, if_all_clear);
3746 }
3747
3748
RecordWriteField(Register object,int offset,Register value,Register scratch,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)3749 void MacroAssembler::RecordWriteField(
3750 Register object,
3751 int offset,
3752 Register value,
3753 Register scratch,
3754 LinkRegisterStatus lr_status,
3755 SaveFPRegsMode save_fp,
3756 RememberedSetAction remembered_set_action,
3757 SmiCheck smi_check,
3758 PointersToHereCheck pointers_to_here_check_for_value) {
3759 // First, check if a write barrier is even needed. The tests below
3760 // catch stores of Smis.
3761 Label done;
3762
3763 // Skip the barrier if writing a smi.
3764 if (smi_check == INLINE_SMI_CHECK) {
3765 JumpIfSmi(value, &done);
3766 }
3767
3768 // Although the object register is tagged, the offset is relative to the start
3769 // of the object, so offset must be a multiple of kPointerSize.
3770 DCHECK(IsAligned(offset, kPointerSize));
3771
3772 Add(scratch, object, offset - kHeapObjectTag);
3773 if (emit_debug_code()) {
3774 Label ok;
3775 Tst(scratch, (1 << kPointerSizeLog2) - 1);
3776 B(eq, &ok);
3777 Abort(kUnalignedCellInWriteBarrier);
3778 Bind(&ok);
3779 }
3780
3781 RecordWrite(object,
3782 scratch,
3783 value,
3784 lr_status,
3785 save_fp,
3786 remembered_set_action,
3787 OMIT_SMI_CHECK,
3788 pointers_to_here_check_for_value);
3789
3790 Bind(&done);
3791
3792 // Clobber clobbered input registers when running with the debug-code flag
3793 // turned on to provoke errors.
3794 if (emit_debug_code()) {
3795 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
3796 Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
3797 }
3798 }
3799
3800
3801 // Will clobber: object, map, dst.
3802 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
RecordWriteForMap(Register object,Register map,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode)3803 void MacroAssembler::RecordWriteForMap(Register object,
3804 Register map,
3805 Register dst,
3806 LinkRegisterStatus lr_status,
3807 SaveFPRegsMode fp_mode) {
3808 ASM_LOCATION("MacroAssembler::RecordWrite");
3809 DCHECK(!AreAliased(object, map));
3810
3811 if (emit_debug_code()) {
3812 UseScratchRegisterScope temps(this);
3813 Register temp = temps.AcquireX();
3814
3815 CompareObjectMap(map, temp, isolate()->factory()->meta_map());
3816 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3817 }
3818
3819 if (!FLAG_incremental_marking) {
3820 return;
3821 }
3822
3823 if (emit_debug_code()) {
3824 UseScratchRegisterScope temps(this);
3825 Register temp = temps.AcquireX();
3826
3827 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
3828 Cmp(temp, map);
3829 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3830 }
3831
3832 // First, check if a write barrier is even needed. The tests below
3833 // catch stores of smis and stores into the young generation.
3834 Label done;
3835
3836 // A single check of the map's pages interesting flag suffices, since it is
3837 // only set during incremental collection, and then it's also guaranteed that
3838 // the from object's page's interesting flag is also set. This optimization
3839 // relies on the fact that maps can never be in new space.
3840 CheckPageFlagClear(map,
3841 map, // Used as scratch.
3842 MemoryChunk::kPointersToHereAreInterestingMask,
3843 &done);
3844
3845 // Record the actual write.
3846 if (lr_status == kLRHasNotBeenSaved) {
3847 Push(lr);
3848 }
3849 Add(dst, object, HeapObject::kMapOffset - kHeapObjectTag);
3850 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
3851 fp_mode);
3852 CallStub(&stub);
3853 if (lr_status == kLRHasNotBeenSaved) {
3854 Pop(lr);
3855 }
3856
3857 Bind(&done);
3858
3859 // Count number of write barriers in generated code.
3860 isolate()->counters()->write_barriers_static()->Increment();
3861 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
3862 dst);
3863
3864 // Clobber clobbered registers when running with the debug-code flag
3865 // turned on to provoke errors.
3866 if (emit_debug_code()) {
3867 Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
3868 Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
3869 }
3870 }
3871
3872
3873 // Will clobber: object, address, value.
3874 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
3875 //
3876 // The register 'object' contains a heap object pointer. The heap object tag is
3877 // shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check,PointersToHereCheck pointers_to_here_check_for_value)3878 void MacroAssembler::RecordWrite(
3879 Register object,
3880 Register address,
3881 Register value,
3882 LinkRegisterStatus lr_status,
3883 SaveFPRegsMode fp_mode,
3884 RememberedSetAction remembered_set_action,
3885 SmiCheck smi_check,
3886 PointersToHereCheck pointers_to_here_check_for_value) {
3887 ASM_LOCATION("MacroAssembler::RecordWrite");
3888 DCHECK(!AreAliased(object, value));
3889
3890 if (emit_debug_code()) {
3891 UseScratchRegisterScope temps(this);
3892 Register temp = temps.AcquireX();
3893
3894 Ldr(temp, MemOperand(address));
3895 Cmp(temp, value);
3896 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
3897 }
3898
3899 // First, check if a write barrier is even needed. The tests below
3900 // catch stores of smis and stores into the young generation.
3901 Label done;
3902
3903 if (smi_check == INLINE_SMI_CHECK) {
3904 DCHECK_EQ(0, kSmiTag);
3905 JumpIfSmi(value, &done);
3906 }
3907
3908 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
3909 CheckPageFlagClear(value,
3910 value, // Used as scratch.
3911 MemoryChunk::kPointersToHereAreInterestingMask,
3912 &done);
3913 }
3914 CheckPageFlagClear(object,
3915 value, // Used as scratch.
3916 MemoryChunk::kPointersFromHereAreInterestingMask,
3917 &done);
3918
3919 // Record the actual write.
3920 if (lr_status == kLRHasNotBeenSaved) {
3921 Push(lr);
3922 }
3923 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
3924 fp_mode);
3925 CallStub(&stub);
3926 if (lr_status == kLRHasNotBeenSaved) {
3927 Pop(lr);
3928 }
3929
3930 Bind(&done);
3931
3932 // Count number of write barriers in generated code.
3933 isolate()->counters()->write_barriers_static()->Increment();
3934 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
3935 value);
3936
3937 // Clobber clobbered registers when running with the debug-code flag
3938 // turned on to provoke errors.
3939 if (emit_debug_code()) {
3940 Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
3941 Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
3942 }
3943 }
3944
3945
AssertHasValidColor(const Register & reg)3946 void MacroAssembler::AssertHasValidColor(const Register& reg) {
3947 if (emit_debug_code()) {
3948 // The bit sequence is backward. The first character in the string
3949 // represents the least significant bit.
3950 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3951
3952 Label color_is_valid;
3953 Tbnz(reg, 0, &color_is_valid);
3954 Tbz(reg, 1, &color_is_valid);
3955 Abort(kUnexpectedColorFound);
3956 Bind(&color_is_valid);
3957 }
3958 }
3959
3960
GetMarkBits(Register addr_reg,Register bitmap_reg,Register shift_reg)3961 void MacroAssembler::GetMarkBits(Register addr_reg,
3962 Register bitmap_reg,
3963 Register shift_reg) {
3964 DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
3965 DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
3966 // addr_reg is divided into fields:
3967 // |63 page base 20|19 high 8|7 shift 3|2 0|
3968 // 'high' gives the index of the cell holding color bits for the object.
3969 // 'shift' gives the offset in the cell for this object's color.
3970 const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3971 UseScratchRegisterScope temps(this);
3972 Register temp = temps.AcquireX();
3973 Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
3974 Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
3975 Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
3976 // bitmap_reg:
3977 // |63 page base 20|19 zeros 15|14 high 3|2 0|
3978 Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
3979 }
3980
3981
HasColor(Register object,Register bitmap_scratch,Register shift_scratch,Label * has_color,int first_bit,int second_bit)3982 void MacroAssembler::HasColor(Register object,
3983 Register bitmap_scratch,
3984 Register shift_scratch,
3985 Label* has_color,
3986 int first_bit,
3987 int second_bit) {
3988 // See mark-compact.h for color definitions.
3989 DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
3990
3991 GetMarkBits(object, bitmap_scratch, shift_scratch);
3992 Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3993 // Shift the bitmap down to get the color of the object in bits [1:0].
3994 Lsr(bitmap_scratch, bitmap_scratch, shift_scratch);
3995
3996 AssertHasValidColor(bitmap_scratch);
3997
3998 // These bit sequences are backwards. The first character in the string
3999 // represents the least significant bit.
4000 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4001 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4002 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
4003
4004 // Check for the color.
4005 if (first_bit == 0) {
4006 // Checking for white.
4007 DCHECK(second_bit == 0);
4008 // We only need to test the first bit.
4009 Tbz(bitmap_scratch, 0, has_color);
4010 } else {
4011 Label other_color;
4012 // Checking for grey or black.
4013 Tbz(bitmap_scratch, 0, &other_color);
4014 if (second_bit == 0) {
4015 Tbz(bitmap_scratch, 1, has_color);
4016 } else {
4017 Tbnz(bitmap_scratch, 1, has_color);
4018 }
4019 Bind(&other_color);
4020 }
4021
4022 // Fall through if it does not have the right color.
4023 }
4024
4025
JumpIfBlack(Register object,Register scratch0,Register scratch1,Label * on_black)4026 void MacroAssembler::JumpIfBlack(Register object,
4027 Register scratch0,
4028 Register scratch1,
4029 Label* on_black) {
4030 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4031 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern.
4032 }
4033
JumpIfWhite(Register value,Register bitmap_scratch,Register shift_scratch,Register load_scratch,Register length_scratch,Label * value_is_white)4034 void MacroAssembler::JumpIfWhite(Register value, Register bitmap_scratch,
4035 Register shift_scratch, Register load_scratch,
4036 Register length_scratch,
4037 Label* value_is_white) {
4038 DCHECK(!AreAliased(
4039 value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
4040
4041 // These bit sequences are backwards. The first character in the string
4042 // represents the least significant bit.
4043 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
4044 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
4045 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
4046
4047 GetMarkBits(value, bitmap_scratch, shift_scratch);
4048 Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
4049 Lsr(load_scratch, load_scratch, shift_scratch);
4050
4051 AssertHasValidColor(load_scratch);
4052
4053 // If the value is black or grey we don't need to do anything.
4054 // Since both black and grey have a 1 in the first position and white does
4055 // not have a 1 there we only need to check one bit.
4056 Tbz(load_scratch, 0, value_is_white);
4057 }
4058
4059
Assert(Condition cond,BailoutReason reason)4060 void MacroAssembler::Assert(Condition cond, BailoutReason reason) {
4061 if (emit_debug_code()) {
4062 Check(cond, reason);
4063 }
4064 }
4065
4066
4067
AssertRegisterIsClear(Register reg,BailoutReason reason)4068 void MacroAssembler::AssertRegisterIsClear(Register reg, BailoutReason reason) {
4069 if (emit_debug_code()) {
4070 CheckRegisterIsClear(reg, reason);
4071 }
4072 }
4073
4074
AssertRegisterIsRoot(Register reg,Heap::RootListIndex index,BailoutReason reason)4075 void MacroAssembler::AssertRegisterIsRoot(Register reg,
4076 Heap::RootListIndex index,
4077 BailoutReason reason) {
4078 if (emit_debug_code()) {
4079 CompareRoot(reg, index);
4080 Check(eq, reason);
4081 }
4082 }
4083
4084
AssertFastElements(Register elements)4085 void MacroAssembler::AssertFastElements(Register elements) {
4086 if (emit_debug_code()) {
4087 UseScratchRegisterScope temps(this);
4088 Register temp = temps.AcquireX();
4089 Label ok;
4090 Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
4091 JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
4092 JumpIfRoot(temp, Heap::kFixedDoubleArrayMapRootIndex, &ok);
4093 JumpIfRoot(temp, Heap::kFixedCOWArrayMapRootIndex, &ok);
4094 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4095 Bind(&ok);
4096 }
4097 }
4098
4099
AssertIsString(const Register & object)4100 void MacroAssembler::AssertIsString(const Register& object) {
4101 if (emit_debug_code()) {
4102 UseScratchRegisterScope temps(this);
4103 Register temp = temps.AcquireX();
4104 STATIC_ASSERT(kSmiTag == 0);
4105 Tst(object, kSmiTagMask);
4106 Check(ne, kOperandIsNotAString);
4107 Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
4108 CompareInstanceType(temp, temp, FIRST_NONSTRING_TYPE);
4109 Check(lo, kOperandIsNotAString);
4110 }
4111 }
4112
4113
Check(Condition cond,BailoutReason reason)4114 void MacroAssembler::Check(Condition cond, BailoutReason reason) {
4115 Label ok;
4116 B(cond, &ok);
4117 Abort(reason);
4118 // Will not return here.
4119 Bind(&ok);
4120 }
4121
4122
CheckRegisterIsClear(Register reg,BailoutReason reason)4123 void MacroAssembler::CheckRegisterIsClear(Register reg, BailoutReason reason) {
4124 Label ok;
4125 Cbz(reg, &ok);
4126 Abort(reason);
4127 // Will not return here.
4128 Bind(&ok);
4129 }
4130
4131
Abort(BailoutReason reason)4132 void MacroAssembler::Abort(BailoutReason reason) {
4133 #ifdef DEBUG
4134 RecordComment("Abort message: ");
4135 RecordComment(GetBailoutReason(reason));
4136
4137 if (FLAG_trap_on_abort) {
4138 Brk(0);
4139 return;
4140 }
4141 #endif
4142
4143 // Abort is used in some contexts where csp is the stack pointer. In order to
4144 // simplify the CallRuntime code, make sure that jssp is the stack pointer.
4145 // There is no risk of register corruption here because Abort doesn't return.
4146 Register old_stack_pointer = StackPointer();
4147 SetStackPointer(jssp);
4148 Mov(jssp, old_stack_pointer);
4149
4150 // We need some scratch registers for the MacroAssembler, so make sure we have
4151 // some. This is safe here because Abort never returns.
4152 RegList old_tmp_list = TmpList()->list();
4153 TmpList()->Combine(MacroAssembler::DefaultTmpList());
4154
4155 if (use_real_aborts()) {
4156 // Avoid infinite recursion; Push contains some assertions that use Abort.
4157 NoUseRealAbortsScope no_real_aborts(this);
4158
4159 // Check if Abort() has already been initialized.
4160 DCHECK(isolate()->builtins()->Abort()->IsHeapObject());
4161
4162 Move(x1, Smi::FromInt(static_cast<int>(reason)));
4163
4164 if (!has_frame_) {
4165 // We don't actually want to generate a pile of code for this, so just
4166 // claim there is a stack frame, without generating one.
4167 FrameScope scope(this, StackFrame::NONE);
4168 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
4169 } else {
4170 Call(isolate()->builtins()->Abort(), RelocInfo::CODE_TARGET);
4171 }
4172 } else {
4173 // Load the string to pass to Printf.
4174 Label msg_address;
4175 Adr(x0, &msg_address);
4176
4177 // Call Printf directly to report the error.
4178 CallPrintf();
4179
4180 // We need a way to stop execution on both the simulator and real hardware,
4181 // and Unreachable() is the best option.
4182 Unreachable();
4183
4184 // Emit the message string directly in the instruction stream.
4185 {
4186 BlockPoolsScope scope(this);
4187 Bind(&msg_address);
4188 EmitStringData(GetBailoutReason(reason));
4189 }
4190 }
4191
4192 SetStackPointer(old_stack_pointer);
4193 TmpList()->set_list(old_tmp_list);
4194 }
4195
LoadNativeContextSlot(int index,Register dst)4196 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
4197 Ldr(dst, NativeContextMemOperand());
4198 Ldr(dst, ContextMemOperand(dst, index));
4199 }
4200
4201
LoadGlobalFunctionInitialMap(Register function,Register map,Register scratch)4202 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4203 Register map,
4204 Register scratch) {
4205 // Load the initial map. The global functions all have initial maps.
4206 Ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4207 if (emit_debug_code()) {
4208 Label ok, fail;
4209 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4210 B(&ok);
4211 Bind(&fail);
4212 Abort(kGlobalFunctionsMustHaveInitialMap);
4213 Bind(&ok);
4214 }
4215 }
4216
4217
4218 // This is the main Printf implementation. All other Printf variants call
4219 // PrintfNoPreserve after setting up one or more PreserveRegisterScopes.
PrintfNoPreserve(const char * format,const CPURegister & arg0,const CPURegister & arg1,const CPURegister & arg2,const CPURegister & arg3)4220 void MacroAssembler::PrintfNoPreserve(const char * format,
4221 const CPURegister& arg0,
4222 const CPURegister& arg1,
4223 const CPURegister& arg2,
4224 const CPURegister& arg3) {
4225 // We cannot handle a caller-saved stack pointer. It doesn't make much sense
4226 // in most cases anyway, so this restriction shouldn't be too serious.
4227 DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
4228
4229 // The provided arguments, and their proper procedure-call standard registers.
4230 CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
4231 CPURegister pcs[kPrintfMaxArgCount] = {NoReg, NoReg, NoReg, NoReg};
4232
4233 int arg_count = kPrintfMaxArgCount;
4234
4235 // The PCS varargs registers for printf. Note that x0 is used for the printf
4236 // format string.
4237 static const CPURegList kPCSVarargs =
4238 CPURegList(CPURegister::kRegister, kXRegSizeInBits, 1, arg_count);
4239 static const CPURegList kPCSVarargsFP =
4240 CPURegList(CPURegister::kFPRegister, kDRegSizeInBits, 0, arg_count - 1);
4241
4242 // We can use caller-saved registers as scratch values, except for the
4243 // arguments and the PCS registers where they might need to go.
4244 CPURegList tmp_list = kCallerSaved;
4245 tmp_list.Remove(x0); // Used to pass the format string.
4246 tmp_list.Remove(kPCSVarargs);
4247 tmp_list.Remove(arg0, arg1, arg2, arg3);
4248
4249 CPURegList fp_tmp_list = kCallerSavedFP;
4250 fp_tmp_list.Remove(kPCSVarargsFP);
4251 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4252
4253 // Override the MacroAssembler's scratch register list. The lists will be
4254 // reset automatically at the end of the UseScratchRegisterScope.
4255 UseScratchRegisterScope temps(this);
4256 TmpList()->set_list(tmp_list.list());
4257 FPTmpList()->set_list(fp_tmp_list.list());
4258
4259 // Copies of the printf vararg registers that we can pop from.
4260 CPURegList pcs_varargs = kPCSVarargs;
4261 CPURegList pcs_varargs_fp = kPCSVarargsFP;
4262
4263 // Place the arguments. There are lots of clever tricks and optimizations we
4264 // could use here, but Printf is a debug tool so instead we just try to keep
4265 // it simple: Move each input that isn't already in the right place to a
4266 // scratch register, then move everything back.
4267 for (unsigned i = 0; i < kPrintfMaxArgCount; i++) {
4268 // Work out the proper PCS register for this argument.
4269 if (args[i].IsRegister()) {
4270 pcs[i] = pcs_varargs.PopLowestIndex().X();
4271 // We might only need a W register here. We need to know the size of the
4272 // argument so we can properly encode it for the simulator call.
4273 if (args[i].Is32Bits()) pcs[i] = pcs[i].W();
4274 } else if (args[i].IsFPRegister()) {
4275 // In C, floats are always cast to doubles for varargs calls.
4276 pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
4277 } else {
4278 DCHECK(args[i].IsNone());
4279 arg_count = i;
4280 break;
4281 }
4282
4283 // If the argument is already in the right place, leave it where it is.
4284 if (args[i].Aliases(pcs[i])) continue;
4285
4286 // Otherwise, if the argument is in a PCS argument register, allocate an
4287 // appropriate scratch register and then move it out of the way.
4288 if (kPCSVarargs.IncludesAliasOf(args[i]) ||
4289 kPCSVarargsFP.IncludesAliasOf(args[i])) {
4290 if (args[i].IsRegister()) {
4291 Register old_arg = Register(args[i]);
4292 Register new_arg = temps.AcquireSameSizeAs(old_arg);
4293 Mov(new_arg, old_arg);
4294 args[i] = new_arg;
4295 } else {
4296 FPRegister old_arg = FPRegister(args[i]);
4297 FPRegister new_arg = temps.AcquireSameSizeAs(old_arg);
4298 Fmov(new_arg, old_arg);
4299 args[i] = new_arg;
4300 }
4301 }
4302 }
4303
4304 // Do a second pass to move values into their final positions and perform any
4305 // conversions that may be required.
4306 for (int i = 0; i < arg_count; i++) {
4307 DCHECK(pcs[i].type() == args[i].type());
4308 if (pcs[i].IsRegister()) {
4309 Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
4310 } else {
4311 DCHECK(pcs[i].IsFPRegister());
4312 if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
4313 Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
4314 } else {
4315 Fcvt(FPRegister(pcs[i]), FPRegister(args[i]));
4316 }
4317 }
4318 }
4319
4320 // Load the format string into x0, as per the procedure-call standard.
4321 //
4322 // To make the code as portable as possible, the format string is encoded
4323 // directly in the instruction stream. It might be cleaner to encode it in a
4324 // literal pool, but since Printf is usually used for debugging, it is
4325 // beneficial for it to be minimally dependent on other features.
4326 Label format_address;
4327 Adr(x0, &format_address);
4328
4329 // Emit the format string directly in the instruction stream.
4330 { BlockPoolsScope scope(this);
4331 Label after_data;
4332 B(&after_data);
4333 Bind(&format_address);
4334 EmitStringData(format);
4335 Unreachable();
4336 Bind(&after_data);
4337 }
4338
4339 // We don't pass any arguments on the stack, but we still need to align the C
4340 // stack pointer to a 16-byte boundary for PCS compliance.
4341 if (!csp.Is(StackPointer())) {
4342 Bic(csp, StackPointer(), 0xf);
4343 }
4344
4345 CallPrintf(arg_count, pcs);
4346 }
4347
4348
CallPrintf(int arg_count,const CPURegister * args)4349 void MacroAssembler::CallPrintf(int arg_count, const CPURegister * args) {
4350 // A call to printf needs special handling for the simulator, since the system
4351 // printf function will use a different instruction set and the procedure-call
4352 // standard will not be compatible.
4353 #ifdef USE_SIMULATOR
4354 { InstructionAccurateScope scope(this, kPrintfLength / kInstructionSize);
4355 hlt(kImmExceptionIsPrintf);
4356 dc32(arg_count); // kPrintfArgCountOffset
4357
4358 // Determine the argument pattern.
4359 uint32_t arg_pattern_list = 0;
4360 for (int i = 0; i < arg_count; i++) {
4361 uint32_t arg_pattern;
4362 if (args[i].IsRegister()) {
4363 arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
4364 } else {
4365 DCHECK(args[i].Is64Bits());
4366 arg_pattern = kPrintfArgD;
4367 }
4368 DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
4369 arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
4370 }
4371 dc32(arg_pattern_list); // kPrintfArgPatternListOffset
4372 }
4373 #else
4374 Call(FUNCTION_ADDR(printf), RelocInfo::EXTERNAL_REFERENCE);
4375 #endif
4376 }
4377
4378
Printf(const char * format,CPURegister arg0,CPURegister arg1,CPURegister arg2,CPURegister arg3)4379 void MacroAssembler::Printf(const char * format,
4380 CPURegister arg0,
4381 CPURegister arg1,
4382 CPURegister arg2,
4383 CPURegister arg3) {
4384 // We can only print sp if it is the current stack pointer.
4385 if (!csp.Is(StackPointer())) {
4386 DCHECK(!csp.Aliases(arg0));
4387 DCHECK(!csp.Aliases(arg1));
4388 DCHECK(!csp.Aliases(arg2));
4389 DCHECK(!csp.Aliases(arg3));
4390 }
4391
4392 // Printf is expected to preserve all registers, so make sure that none are
4393 // available as scratch registers until we've preserved them.
4394 RegList old_tmp_list = TmpList()->list();
4395 RegList old_fp_tmp_list = FPTmpList()->list();
4396 TmpList()->set_list(0);
4397 FPTmpList()->set_list(0);
4398
4399 // Preserve all caller-saved registers as well as NZCV.
4400 // If csp is the stack pointer, PushCPURegList asserts that the size of each
4401 // list is a multiple of 16 bytes.
4402 PushCPURegList(kCallerSaved);
4403 PushCPURegList(kCallerSavedFP);
4404
4405 // We can use caller-saved registers as scratch values (except for argN).
4406 CPURegList tmp_list = kCallerSaved;
4407 CPURegList fp_tmp_list = kCallerSavedFP;
4408 tmp_list.Remove(arg0, arg1, arg2, arg3);
4409 fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
4410 TmpList()->set_list(tmp_list.list());
4411 FPTmpList()->set_list(fp_tmp_list.list());
4412
4413 { UseScratchRegisterScope temps(this);
4414 // If any of the arguments are the current stack pointer, allocate a new
4415 // register for them, and adjust the value to compensate for pushing the
4416 // caller-saved registers.
4417 bool arg0_sp = StackPointer().Aliases(arg0);
4418 bool arg1_sp = StackPointer().Aliases(arg1);
4419 bool arg2_sp = StackPointer().Aliases(arg2);
4420 bool arg3_sp = StackPointer().Aliases(arg3);
4421 if (arg0_sp || arg1_sp || arg2_sp || arg3_sp) {
4422 // Allocate a register to hold the original stack pointer value, to pass
4423 // to PrintfNoPreserve as an argument.
4424 Register arg_sp = temps.AcquireX();
4425 Add(arg_sp, StackPointer(),
4426 kCallerSaved.TotalSizeInBytes() + kCallerSavedFP.TotalSizeInBytes());
4427 if (arg0_sp) arg0 = Register::Create(arg_sp.code(), arg0.SizeInBits());
4428 if (arg1_sp) arg1 = Register::Create(arg_sp.code(), arg1.SizeInBits());
4429 if (arg2_sp) arg2 = Register::Create(arg_sp.code(), arg2.SizeInBits());
4430 if (arg3_sp) arg3 = Register::Create(arg_sp.code(), arg3.SizeInBits());
4431 }
4432
4433 // Preserve NZCV.
4434 { UseScratchRegisterScope temps(this);
4435 Register tmp = temps.AcquireX();
4436 Mrs(tmp, NZCV);
4437 Push(tmp, xzr);
4438 }
4439
4440 PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
4441
4442 // Restore NZCV.
4443 { UseScratchRegisterScope temps(this);
4444 Register tmp = temps.AcquireX();
4445 Pop(xzr, tmp);
4446 Msr(NZCV, tmp);
4447 }
4448 }
4449
4450 PopCPURegList(kCallerSavedFP);
4451 PopCPURegList(kCallerSaved);
4452
4453 TmpList()->set_list(old_tmp_list);
4454 FPTmpList()->set_list(old_fp_tmp_list);
4455 }
4456
4457
EmitFrameSetupForCodeAgePatching()4458 void MacroAssembler::EmitFrameSetupForCodeAgePatching() {
4459 // TODO(jbramley): Other architectures use the internal memcpy to copy the
4460 // sequence. If this is a performance bottleneck, we should consider caching
4461 // the sequence and copying it in the same way.
4462 InstructionAccurateScope scope(this,
4463 kNoCodeAgeSequenceLength / kInstructionSize);
4464 DCHECK(jssp.Is(StackPointer()));
4465 EmitFrameSetupForCodeAgePatching(this);
4466 }
4467
4468
4469
EmitCodeAgeSequence(Code * stub)4470 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
4471 InstructionAccurateScope scope(this,
4472 kNoCodeAgeSequenceLength / kInstructionSize);
4473 DCHECK(jssp.Is(StackPointer()));
4474 EmitCodeAgeSequence(this, stub);
4475 }
4476
4477
4478 #undef __
4479 #define __ assm->
4480
4481
EmitFrameSetupForCodeAgePatching(Assembler * assm)4482 void MacroAssembler::EmitFrameSetupForCodeAgePatching(Assembler * assm) {
4483 Label start;
4484 __ bind(&start);
4485
4486 // We can do this sequence using four instructions, but the code ageing
4487 // sequence that patches it needs five, so we use the extra space to try to
4488 // simplify some addressing modes and remove some dependencies (compared to
4489 // using two stp instructions with write-back).
4490 __ sub(jssp, jssp, 4 * kXRegSize);
4491 __ sub(csp, csp, 4 * kXRegSize);
4492 __ stp(x1, cp, MemOperand(jssp, 0 * kXRegSize));
4493 __ stp(fp, lr, MemOperand(jssp, 2 * kXRegSize));
4494 __ add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
4495
4496 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4497 }
4498
4499
EmitCodeAgeSequence(Assembler * assm,Code * stub)4500 void MacroAssembler::EmitCodeAgeSequence(Assembler * assm,
4501 Code * stub) {
4502 Label start;
4503 __ bind(&start);
4504 // When the stub is called, the sequence is replaced with the young sequence
4505 // (as in EmitFrameSetupForCodeAgePatching). After the code is replaced, the
4506 // stub jumps to &start, stored in x0. The young sequence does not call the
4507 // stub so there is no infinite loop here.
4508 //
4509 // A branch (br) is used rather than a call (blr) because this code replaces
4510 // the frame setup code that would normally preserve lr.
4511 __ ldr_pcrel(ip0, kCodeAgeStubEntryOffset >> kLoadLiteralScaleLog2);
4512 __ adr(x0, &start);
4513 __ br(ip0);
4514 // IsCodeAgeSequence in codegen-arm64.cc assumes that the code generated up
4515 // until now (kCodeAgeStubEntryOffset) is the same for all code age sequences.
4516 __ AssertSizeOfCodeGeneratedSince(&start, kCodeAgeStubEntryOffset);
4517 if (stub) {
4518 __ dc64(reinterpret_cast<uint64_t>(stub->instruction_start()));
4519 __ AssertSizeOfCodeGeneratedSince(&start, kNoCodeAgeSequenceLength);
4520 }
4521 }
4522
4523
IsYoungSequence(Isolate * isolate,byte * sequence)4524 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
4525 bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
4526 DCHECK(is_young ||
4527 isolate->code_aging_helper()->IsOld(sequence));
4528 return is_young;
4529 }
4530
4531
TruncatingDiv(Register result,Register dividend,int32_t divisor)4532 void MacroAssembler::TruncatingDiv(Register result,
4533 Register dividend,
4534 int32_t divisor) {
4535 DCHECK(!AreAliased(result, dividend));
4536 DCHECK(result.Is32Bits() && dividend.Is32Bits());
4537 base::MagicNumbersForDivision<uint32_t> mag =
4538 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4539 Mov(result, mag.multiplier);
4540 Smull(result.X(), dividend, result);
4541 Asr(result.X(), result.X(), 32);
4542 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4543 if (divisor > 0 && neg) Add(result, result, dividend);
4544 if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
4545 if (mag.shift > 0) Asr(result, result, mag.shift);
4546 Add(result, result, Operand(dividend, LSR, 31));
4547 }
4548
4549
4550 #undef __
4551
4552
~UseScratchRegisterScope()4553 UseScratchRegisterScope::~UseScratchRegisterScope() {
4554 available_->set_list(old_available_);
4555 availablefp_->set_list(old_availablefp_);
4556 }
4557
4558
AcquireSameSizeAs(const Register & reg)4559 Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
4560 int code = AcquireNextAvailable(available_).code();
4561 return Register::Create(code, reg.SizeInBits());
4562 }
4563
4564
AcquireSameSizeAs(const FPRegister & reg)4565 FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
4566 int code = AcquireNextAvailable(availablefp_).code();
4567 return FPRegister::Create(code, reg.SizeInBits());
4568 }
4569
4570
AcquireNextAvailable(CPURegList * available)4571 CPURegister UseScratchRegisterScope::AcquireNextAvailable(
4572 CPURegList* available) {
4573 CHECK(!available->IsEmpty());
4574 CPURegister result = available->PopLowestIndex();
4575 DCHECK(!AreAliased(result, xzr, csp));
4576 return result;
4577 }
4578
4579
UnsafeAcquire(CPURegList * available,const CPURegister & reg)4580 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
4581 const CPURegister& reg) {
4582 DCHECK(available->IncludesAliasOf(reg));
4583 available->Remove(reg);
4584 return reg;
4585 }
4586
4587
4588 #define __ masm->
4589
4590
Emit(MacroAssembler * masm,const Register & reg,const Label * smi_check)4591 void InlineSmiCheckInfo::Emit(MacroAssembler* masm, const Register& reg,
4592 const Label* smi_check) {
4593 Assembler::BlockPoolsScope scope(masm);
4594 if (reg.IsValid()) {
4595 DCHECK(smi_check->is_bound());
4596 DCHECK(reg.Is64Bits());
4597
4598 // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
4599 // 'check' in the other bits. The possible offset is limited in that we
4600 // use BitField to pack the data, and the underlying data type is a
4601 // uint32_t.
4602 uint32_t delta =
4603 static_cast<uint32_t>(__ InstructionsGeneratedSince(smi_check));
4604 __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
4605 } else {
4606 DCHECK(!smi_check->is_bound());
4607
4608 // An offset of 0 indicates that there is no patch site.
4609 __ InlineData(0);
4610 }
4611 }
4612
InlineSmiCheckInfo(Address info)4613 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
4614 : reg_(NoReg), smi_check_delta_(0), smi_check_(NULL) {
4615 InstructionSequence* inline_data = InstructionSequence::At(info);
4616 DCHECK(inline_data->IsInlineData());
4617 if (inline_data->IsInlineData()) {
4618 uint64_t payload = inline_data->InlineData();
4619 // We use BitField to decode the payload, and BitField can only handle
4620 // 32-bit values.
4621 DCHECK(is_uint32(payload));
4622 if (payload != 0) {
4623 uint32_t payload32 = static_cast<uint32_t>(payload);
4624 int reg_code = RegisterBits::decode(payload32);
4625 reg_ = Register::XRegFromCode(reg_code);
4626 smi_check_delta_ = DeltaBits::decode(payload32);
4627 DCHECK_NE(0, smi_check_delta_);
4628 smi_check_ = inline_data->preceding(smi_check_delta_);
4629 }
4630 }
4631 }
4632
4633
4634 #undef __
4635
4636
4637 } // namespace internal
4638 } // namespace v8
4639
4640 #endif // V8_TARGET_ARCH_ARM64
4641