1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
9
10 #include "src/base/bits.h"
11 #include "src/base/division-by-constant.h"
12 #include "src/codegen/callable.h"
13 #include "src/codegen/code-factory.h"
14 #include "src/codegen/external-reference-table.h"
15 #include "src/codegen/macro-assembler.h"
16 #include "src/codegen/register-configuration.h"
17 #include "src/debug/debug.h"
18 #include "src/execution/frames-inl.h"
19 #include "src/heap/memory-chunk.h"
20 #include "src/init/bootstrapper.h"
21 #include "src/logging/counters.h"
22 #include "src/runtime/runtime.h"
23 #include "src/snapshot/embedded/embedded-data.h"
24 #include "src/snapshot/snapshot.h"
25 #include "src/wasm/wasm-code-manager.h"
26
27 // Satisfy cpplint check, but don't include platform-specific header. It is
28 // included recursively via macro-assembler.h.
29 #if 0
30 #include "src/codegen/ppc/macro-assembler-ppc.h"
31 #endif
32
33 namespace v8 {
34 namespace internal {
35
RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3) const36 int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
37 Register exclusion1,
38 Register exclusion2,
39 Register exclusion3) const {
40 int bytes = 0;
41 RegList exclusions = 0;
42 if (exclusion1 != no_reg) {
43 exclusions |= exclusion1.bit();
44 if (exclusion2 != no_reg) {
45 exclusions |= exclusion2.bit();
46 if (exclusion3 != no_reg) {
47 exclusions |= exclusion3.bit();
48 }
49 }
50 }
51
52 RegList list = kJSCallerSaved & ~exclusions;
53 bytes += NumRegs(list) * kSystemPointerSize;
54
55 if (fp_mode == kSaveFPRegs) {
56 bytes += kNumCallerSavedDoubles * kDoubleSize;
57 }
58
59 return bytes;
60 }
61
PushCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)62 int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
63 Register exclusion2, Register exclusion3) {
64 int bytes = 0;
65 RegList exclusions = 0;
66 if (exclusion1 != no_reg) {
67 exclusions |= exclusion1.bit();
68 if (exclusion2 != no_reg) {
69 exclusions |= exclusion2.bit();
70 if (exclusion3 != no_reg) {
71 exclusions |= exclusion3.bit();
72 }
73 }
74 }
75
76 RegList list = kJSCallerSaved & ~exclusions;
77 MultiPush(list);
78 bytes += NumRegs(list) * kSystemPointerSize;
79
80 if (fp_mode == kSaveFPRegs) {
81 MultiPushDoubles(kCallerSavedDoubles);
82 bytes += kNumCallerSavedDoubles * kDoubleSize;
83 }
84
85 return bytes;
86 }
87
PopCallerSaved(SaveFPRegsMode fp_mode,Register exclusion1,Register exclusion2,Register exclusion3)88 int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
89 Register exclusion2, Register exclusion3) {
90 int bytes = 0;
91 if (fp_mode == kSaveFPRegs) {
92 MultiPopDoubles(kCallerSavedDoubles);
93 bytes += kNumCallerSavedDoubles * kDoubleSize;
94 }
95
96 RegList exclusions = 0;
97 if (exclusion1 != no_reg) {
98 exclusions |= exclusion1.bit();
99 if (exclusion2 != no_reg) {
100 exclusions |= exclusion2.bit();
101 if (exclusion3 != no_reg) {
102 exclusions |= exclusion3.bit();
103 }
104 }
105 }
106
107 RegList list = kJSCallerSaved & ~exclusions;
108 MultiPop(list);
109 bytes += NumRegs(list) * kSystemPointerSize;
110
111 return bytes;
112 }
113
Jump(Register target)114 void TurboAssembler::Jump(Register target) {
115 mtctr(target);
116 bctr();
117 }
118
LoadFromConstantsTable(Register destination,int constant_index)119 void TurboAssembler::LoadFromConstantsTable(Register destination,
120 int constant_index) {
121 DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
122
123 DCHECK_NE(destination, r0);
124 LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
125 LoadTaggedPointerField(
126 destination,
127 FieldMemOperand(destination,
128 FixedArray::OffsetOfElementAt(constant_index)),
129 r0);
130 }
131
LoadRootRelative(Register destination,int32_t offset)132 void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
133 LoadP(destination, MemOperand(kRootRegister, offset), r0);
134 }
135
LoadRootRegisterOffset(Register destination,intptr_t offset)136 void TurboAssembler::LoadRootRegisterOffset(Register destination,
137 intptr_t offset) {
138 if (offset == 0) {
139 mr(destination, kRootRegister);
140 } else if (is_int16(offset)) {
141 addi(destination, kRootRegister, Operand(offset));
142 } else {
143 mov(destination, Operand(offset));
144 add(destination, kRootRegister, destination);
145 }
146 }
147
Jump(intptr_t target,RelocInfo::Mode rmode,Condition cond,CRegister cr)148 void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
149 Condition cond, CRegister cr) {
150 Label skip;
151
152 if (cond != al) b(NegateCondition(cond), &skip, cr);
153
154 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
155
156 mov(ip, Operand(target, rmode));
157 mtctr(ip);
158 bctr();
159
160 bind(&skip);
161 }
162
Jump(Address target,RelocInfo::Mode rmode,Condition cond,CRegister cr)163 void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
164 CRegister cr) {
165 DCHECK(!RelocInfo::IsCodeTarget(rmode));
166 Jump(static_cast<intptr_t>(target), rmode, cond, cr);
167 }
168
Jump(Handle<Code> code,RelocInfo::Mode rmode,Condition cond,CRegister cr)169 void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
170 Condition cond, CRegister cr) {
171 DCHECK(RelocInfo::IsCodeTarget(rmode));
172 DCHECK_IMPLIES(options().isolate_independent_code,
173 Builtins::IsIsolateIndependentBuiltin(*code));
174
175 int builtin_index = Builtins::kNoBuiltinId;
176 bool target_is_builtin =
177 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
178
179 if (root_array_available_ && options().isolate_independent_code) {
180 Label skip;
181 Register scratch = ip;
182 int offset = code->builtin_index() * kSystemPointerSize +
183 IsolateData::builtin_entry_table_offset();
184 LoadP(scratch, MemOperand(kRootRegister, offset), r0);
185 if (cond != al) b(NegateCondition(cond), &skip, cr);
186 Jump(scratch);
187 bind(&skip);
188 return;
189 } else if (options().inline_offheap_trampolines && target_is_builtin) {
190 // Inline the trampoline.
191 Label skip;
192 RecordCommentForOffHeapTrampoline(builtin_index);
193 EmbeddedData d = EmbeddedData::FromBlob();
194 Address entry = d.InstructionStartOfBuiltin(builtin_index);
195 // Use ip directly instead of using UseScratchRegisterScope, as we do
196 // not preserve scratch registers across calls.
197 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
198 if (cond != al) b(NegateCondition(cond), &skip, cr);
199 Jump(ip);
200 bind(&skip);
201 return;
202 }
203 int32_t target_index = AddCodeTarget(code);
204 Jump(static_cast<intptr_t>(target_index), rmode, cond, cr);
205 }
206
Jump(const ExternalReference & reference)207 void TurboAssembler::Jump(const ExternalReference& reference) {
208 UseScratchRegisterScope temps(this);
209 Register scratch = temps.Acquire();
210 Move(scratch, reference);
211 if (ABI_USES_FUNCTION_DESCRIPTORS) {
212 // AIX uses a function descriptor. When calling C code be
213 // aware of this descriptor and pick up values from it.
214 LoadP(ToRegister(ABI_TOC_REGISTER),
215 MemOperand(scratch, kSystemPointerSize));
216 LoadP(scratch, MemOperand(scratch, 0));
217 }
218 Jump(scratch);
219 }
220
Call(Register target)221 void TurboAssembler::Call(Register target) {
222 BlockTrampolinePoolScope block_trampoline_pool(this);
223 // branch via link register and set LK bit for return point
224 mtctr(target);
225 bctrl();
226 }
227
CallJSEntry(Register target)228 void MacroAssembler::CallJSEntry(Register target) {
229 CHECK(target == r5);
230 Call(target);
231 }
232
CallSizeNotPredictableCodeSize(Address target,RelocInfo::Mode rmode,Condition cond)233 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
234 RelocInfo::Mode rmode,
235 Condition cond) {
236 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
237 }
238
Call(Address target,RelocInfo::Mode rmode,Condition cond)239 void TurboAssembler::Call(Address target, RelocInfo::Mode rmode,
240 Condition cond) {
241 BlockTrampolinePoolScope block_trampoline_pool(this);
242 DCHECK(cond == al);
243
244 // This can likely be optimized to make use of bc() with 24bit relative
245 //
246 // RecordRelocInfo(x.rmode_, x.immediate);
247 // bc( BA, .... offset, LKset);
248 //
249
250 mov(ip, Operand(target, rmode));
251 mtctr(ip);
252 bctrl();
253 }
254
Call(Handle<Code> code,RelocInfo::Mode rmode,Condition cond)255 void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
256 Condition cond) {
257 BlockTrampolinePoolScope block_trampoline_pool(this);
258 DCHECK(RelocInfo::IsCodeTarget(rmode));
259 DCHECK_IMPLIES(options().isolate_independent_code,
260 Builtins::IsIsolateIndependentBuiltin(*code));
261 DCHECK_IMPLIES(options().use_pc_relative_calls_and_jumps,
262 Builtins::IsIsolateIndependentBuiltin(*code));
263
264 int builtin_index = Builtins::kNoBuiltinId;
265 bool target_is_builtin =
266 isolate()->builtins()->IsBuiltinHandle(code, &builtin_index);
267
268 if (root_array_available_ && options().isolate_independent_code) {
269 Label skip;
270 int offset = code->builtin_index() * kSystemPointerSize +
271 IsolateData::builtin_entry_table_offset();
272 LoadP(ip, MemOperand(kRootRegister, offset));
273 if (cond != al) b(NegateCondition(cond), &skip);
274 Call(ip);
275 bind(&skip);
276 return;
277 } else if (options().inline_offheap_trampolines && target_is_builtin) {
278 // Inline the trampoline.
279 RecordCommentForOffHeapTrampoline(builtin_index);
280 EmbeddedData d = EmbeddedData::FromBlob();
281 Address entry = d.InstructionStartOfBuiltin(builtin_index);
282 // Use ip directly instead of using UseScratchRegisterScope, as we do
283 // not preserve scratch registers across calls.
284 mov(ip, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
285 Label skip;
286 if (cond != al) b(NegateCondition(cond), &skip);
287 Call(ip);
288 bind(&skip);
289 return;
290 }
291 DCHECK(code->IsExecutable());
292 int32_t target_index = AddCodeTarget(code);
293 Call(static_cast<Address>(target_index), rmode, cond);
294 }
295
Drop(int count)296 void TurboAssembler::Drop(int count) {
297 if (count > 0) {
298 Add(sp, sp, count * kSystemPointerSize, r0);
299 }
300 }
301
Drop(Register count,Register scratch)302 void TurboAssembler::Drop(Register count, Register scratch) {
303 ShiftLeftImm(scratch, count, Operand(kSystemPointerSizeLog2));
304 add(sp, sp, scratch);
305 }
306
Call(Label * target)307 void TurboAssembler::Call(Label* target) { b(target, SetLK); }
308
Push(Handle<HeapObject> handle)309 void TurboAssembler::Push(Handle<HeapObject> handle) {
310 mov(r0, Operand(handle));
311 push(r0);
312 }
313
Push(Smi smi)314 void TurboAssembler::Push(Smi smi) {
315 mov(r0, Operand(smi));
316 push(r0);
317 }
318
PushArray(Register array,Register size,Register scratch,Register scratch2,PushArrayOrder order)319 void TurboAssembler::PushArray(Register array, Register size, Register scratch,
320 Register scratch2, PushArrayOrder order) {
321 Label loop, done;
322
323 if (order == kNormal) {
324 cmpi(size, Operand::Zero());
325 beq(&done);
326 ShiftLeftImm(scratch, size, Operand(kSystemPointerSizeLog2));
327 add(scratch, array, scratch);
328 mtctr(size);
329
330 bind(&loop);
331 LoadPU(scratch2, MemOperand(scratch, -kSystemPointerSize));
332 StorePU(scratch2, MemOperand(sp, -kSystemPointerSize));
333 bdnz(&loop);
334
335 bind(&done);
336 } else {
337 cmpi(size, Operand::Zero());
338 beq(&done);
339
340 mtctr(size);
341 subi(scratch, array, Operand(kSystemPointerSize));
342
343 bind(&loop);
344 LoadPU(scratch2, MemOperand(scratch, kSystemPointerSize));
345 StorePU(scratch2, MemOperand(sp, -kSystemPointerSize));
346 bdnz(&loop);
347 bind(&done);
348 }
349 }
350
Move(Register dst,Handle<HeapObject> value,RelocInfo::Mode rmode)351 void TurboAssembler::Move(Register dst, Handle<HeapObject> value,
352 RelocInfo::Mode rmode) {
353 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
354 // non-isolate-independent code. In many cases it might be cheaper than
355 // embedding the relocatable value.
356 if (root_array_available_ && options().isolate_independent_code) {
357 IndirectLoadConstant(dst, value);
358 return;
359 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
360 EmbeddedObjectIndex index = AddEmbeddedObject(value);
361 DCHECK(is_uint32(index));
362 mov(dst, Operand(static_cast<int>(index), rmode));
363 } else {
364 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
365 mov(dst, Operand(value.address(), rmode));
366 }
367 }
368
Move(Register dst,ExternalReference reference)369 void TurboAssembler::Move(Register dst, ExternalReference reference) {
370 // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
371 // non-isolate-independent code. In many cases it might be cheaper than
372 // embedding the relocatable value.
373 if (root_array_available_ && options().isolate_independent_code) {
374 IndirectLoadExternalReference(dst, reference);
375 return;
376 }
377 mov(dst, Operand(reference));
378 }
379
Move(Register dst,Register src,Condition cond)380 void TurboAssembler::Move(Register dst, Register src, Condition cond) {
381 DCHECK(cond == al);
382 if (dst != src) {
383 mr(dst, src);
384 }
385 }
386
Move(DoubleRegister dst,DoubleRegister src)387 void TurboAssembler::Move(DoubleRegister dst, DoubleRegister src) {
388 if (dst != src) {
389 fmr(dst, src);
390 }
391 }
392
MultiPush(RegList regs,Register location)393 void TurboAssembler::MultiPush(RegList regs, Register location) {
394 int16_t num_to_push = base::bits::CountPopulation(regs);
395 int16_t stack_offset = num_to_push * kSystemPointerSize;
396
397 subi(location, location, Operand(stack_offset));
398 for (int16_t i = Register::kNumRegisters - 1; i >= 0; i--) {
399 if ((regs & (1 << i)) != 0) {
400 stack_offset -= kSystemPointerSize;
401 StoreP(ToRegister(i), MemOperand(location, stack_offset));
402 }
403 }
404 }
405
MultiPop(RegList regs,Register location)406 void TurboAssembler::MultiPop(RegList regs, Register location) {
407 int16_t stack_offset = 0;
408
409 for (int16_t i = 0; i < Register::kNumRegisters; i++) {
410 if ((regs & (1 << i)) != 0) {
411 LoadP(ToRegister(i), MemOperand(location, stack_offset));
412 stack_offset += kSystemPointerSize;
413 }
414 }
415 addi(location, location, Operand(stack_offset));
416 }
417
MultiPushDoubles(RegList dregs,Register location)418 void TurboAssembler::MultiPushDoubles(RegList dregs, Register location) {
419 int16_t num_to_push = base::bits::CountPopulation(dregs);
420 int16_t stack_offset = num_to_push * kDoubleSize;
421
422 subi(location, location, Operand(stack_offset));
423 for (int16_t i = DoubleRegister::kNumRegisters - 1; i >= 0; i--) {
424 if ((dregs & (1 << i)) != 0) {
425 DoubleRegister dreg = DoubleRegister::from_code(i);
426 stack_offset -= kDoubleSize;
427 stfd(dreg, MemOperand(location, stack_offset));
428 }
429 }
430 }
431
MultiPopDoubles(RegList dregs,Register location)432 void TurboAssembler::MultiPopDoubles(RegList dregs, Register location) {
433 int16_t stack_offset = 0;
434
435 for (int16_t i = 0; i < DoubleRegister::kNumRegisters; i++) {
436 if ((dregs & (1 << i)) != 0) {
437 DoubleRegister dreg = DoubleRegister::from_code(i);
438 lfd(dreg, MemOperand(location, stack_offset));
439 stack_offset += kDoubleSize;
440 }
441 }
442 addi(location, location, Operand(stack_offset));
443 }
444
LoadRoot(Register destination,RootIndex index,Condition cond)445 void TurboAssembler::LoadRoot(Register destination, RootIndex index,
446 Condition cond) {
447 DCHECK(cond == al);
448 LoadP(destination,
449 MemOperand(kRootRegister, RootRegisterOffsetForRootIndex(index)), r0);
450 }
451
LoadTaggedPointerField(const Register & destination,const MemOperand & field_operand,const Register & scratch)452 void TurboAssembler::LoadTaggedPointerField(const Register& destination,
453 const MemOperand& field_operand,
454 const Register& scratch) {
455 if (COMPRESS_POINTERS_BOOL) {
456 DecompressTaggedPointer(destination, field_operand);
457 } else {
458 LoadP(destination, field_operand, scratch);
459 }
460 }
461
LoadAnyTaggedField(const Register & destination,const MemOperand & field_operand,const Register & scratch)462 void TurboAssembler::LoadAnyTaggedField(const Register& destination,
463 const MemOperand& field_operand,
464 const Register& scratch) {
465 if (COMPRESS_POINTERS_BOOL) {
466 DecompressAnyTagged(destination, field_operand);
467 } else {
468 LoadP(destination, field_operand, scratch);
469 }
470 }
471
SmiUntag(Register dst,const MemOperand & src,RCBit rc)472 void TurboAssembler::SmiUntag(Register dst, const MemOperand& src, RCBit rc) {
473 if (SmiValuesAre31Bits()) {
474 lwz(dst, src);
475 } else {
476 LoadP(dst, src);
477 }
478
479 SmiUntag(dst, rc);
480 }
481
SmiUntagField(Register dst,const MemOperand & src,RCBit rc)482 void TurboAssembler::SmiUntagField(Register dst, const MemOperand& src,
483 RCBit rc) {
484 SmiUntag(dst, src, rc);
485 }
486
StoreTaggedFieldX(const Register & value,const MemOperand & dst_field_operand,const Register & scratch)487 void TurboAssembler::StoreTaggedFieldX(const Register& value,
488 const MemOperand& dst_field_operand,
489 const Register& scratch) {
490 if (COMPRESS_POINTERS_BOOL) {
491 RecordComment("[ StoreTagged");
492 stwx(value, dst_field_operand);
493 RecordComment("]");
494 } else {
495 StorePX(value, dst_field_operand);
496 }
497 }
498
StoreTaggedField(const Register & value,const MemOperand & dst_field_operand,const Register & scratch)499 void TurboAssembler::StoreTaggedField(const Register& value,
500 const MemOperand& dst_field_operand,
501 const Register& scratch) {
502 if (COMPRESS_POINTERS_BOOL) {
503 RecordComment("[ StoreTagged");
504 StoreWord(value, dst_field_operand, scratch);
505 RecordComment("]");
506 } else {
507 StoreP(value, dst_field_operand, scratch);
508 }
509 }
510
DecompressTaggedSigned(Register destination,Register src)511 void TurboAssembler::DecompressTaggedSigned(Register destination,
512 Register src) {
513 RecordComment("[ DecompressTaggedSigned");
514 ZeroExtWord32(destination, src);
515 RecordComment("]");
516 }
517
DecompressTaggedSigned(Register destination,MemOperand field_operand)518 void TurboAssembler::DecompressTaggedSigned(Register destination,
519 MemOperand field_operand) {
520 RecordComment("[ DecompressTaggedSigned");
521 LoadWord(destination, field_operand, r0);
522 RecordComment("]");
523 }
524
DecompressTaggedPointer(Register destination,Register source)525 void TurboAssembler::DecompressTaggedPointer(Register destination,
526 Register source) {
527 RecordComment("[ DecompressTaggedPointer");
528 ZeroExtWord32(destination, source);
529 add(destination, destination, kRootRegister);
530 RecordComment("]");
531 }
532
DecompressTaggedPointer(Register destination,MemOperand field_operand)533 void TurboAssembler::DecompressTaggedPointer(Register destination,
534 MemOperand field_operand) {
535 RecordComment("[ DecompressTaggedPointer");
536 LoadWord(destination, field_operand, r0);
537 add(destination, destination, kRootRegister);
538 RecordComment("]");
539 }
540
DecompressAnyTagged(Register destination,MemOperand field_operand)541 void TurboAssembler::DecompressAnyTagged(Register destination,
542 MemOperand field_operand) {
543 RecordComment("[ DecompressAnyTagged");
544 LoadWord(destination, field_operand, r0);
545 add(destination, destination, kRootRegister);
546 RecordComment("]");
547 }
548
DecompressAnyTagged(Register destination,Register source)549 void TurboAssembler::DecompressAnyTagged(Register destination,
550 Register source) {
551 RecordComment("[ DecompressAnyTagged");
552 ZeroExtWord32(destination, source);
553 add(destination, destination, kRootRegister);
554 RecordComment("]");
555 }
556
RecordWriteField(Register object,int offset,Register value,Register dst,LinkRegisterStatus lr_status,SaveFPRegsMode save_fp,RememberedSetAction remembered_set_action,SmiCheck smi_check)557 void MacroAssembler::RecordWriteField(Register object, int offset,
558 Register value, Register dst,
559 LinkRegisterStatus lr_status,
560 SaveFPRegsMode save_fp,
561 RememberedSetAction remembered_set_action,
562 SmiCheck smi_check) {
563 // First, check if a write barrier is even needed. The tests below
564 // catch stores of Smis.
565 Label done;
566
567 // Skip barrier if writing a smi.
568 if (smi_check == INLINE_SMI_CHECK) {
569 JumpIfSmi(value, &done);
570 }
571
572 // Although the object register is tagged, the offset is relative to the start
573 // of the object, so so offset must be a multiple of kSystemPointerSize.
574 DCHECK(IsAligned(offset, kTaggedSize));
575
576 Add(dst, object, offset - kHeapObjectTag, r0);
577 if (emit_debug_code()) {
578 Label ok;
579 andi(r0, dst, Operand(kTaggedSize - 1));
580 beq(&ok, cr0);
581 stop();
582 bind(&ok);
583 }
584
585 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
586 OMIT_SMI_CHECK);
587
588 bind(&done);
589
590 // Clobber clobbered input registers when running with the debug-code flag
591 // turned on to provoke errors.
592 if (emit_debug_code()) {
593 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
594 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
595 }
596 }
597
SaveRegisters(RegList registers)598 void TurboAssembler::SaveRegisters(RegList registers) {
599 DCHECK_GT(NumRegs(registers), 0);
600 RegList regs = 0;
601 for (int i = 0; i < Register::kNumRegisters; ++i) {
602 if ((registers >> i) & 1u) {
603 regs |= Register::from_code(i).bit();
604 }
605 }
606
607 MultiPush(regs);
608 }
609
RestoreRegisters(RegList registers)610 void TurboAssembler::RestoreRegisters(RegList registers) {
611 DCHECK_GT(NumRegs(registers), 0);
612 RegList regs = 0;
613 for (int i = 0; i < Register::kNumRegisters; ++i) {
614 if ((registers >> i) & 1u) {
615 regs |= Register::from_code(i).bit();
616 }
617 }
618 MultiPop(regs);
619 }
620
CallEphemeronKeyBarrier(Register object,Register address,SaveFPRegsMode fp_mode)621 void TurboAssembler::CallEphemeronKeyBarrier(Register object, Register address,
622 SaveFPRegsMode fp_mode) {
623 EphemeronKeyBarrierDescriptor descriptor;
624 RegList registers = descriptor.allocatable_registers();
625
626 SaveRegisters(registers);
627
628 Register object_parameter(
629 descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kObject));
630 Register slot_parameter(descriptor.GetRegisterParameter(
631 EphemeronKeyBarrierDescriptor::kSlotAddress));
632 Register fp_mode_parameter(
633 descriptor.GetRegisterParameter(EphemeronKeyBarrierDescriptor::kFPMode));
634
635 push(object);
636 push(address);
637
638 pop(slot_parameter);
639 pop(object_parameter);
640
641 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
642 Call(isolate()->builtins()->builtin_handle(Builtins::kEphemeronKeyBarrier),
643 RelocInfo::CODE_TARGET);
644 RestoreRegisters(registers);
645 }
646
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode)647 void TurboAssembler::CallRecordWriteStub(
648 Register object, Register address,
649 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode) {
650 CallRecordWriteStub(
651 object, address, remembered_set_action, fp_mode,
652 isolate()->builtins()->builtin_handle(Builtins::kRecordWrite),
653 kNullAddress);
654 }
655
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Address wasm_target)656 void TurboAssembler::CallRecordWriteStub(
657 Register object, Register address,
658 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
659 Address wasm_target) {
660 CallRecordWriteStub(object, address, remembered_set_action, fp_mode,
661 Handle<Code>::null(), wasm_target);
662 }
663
CallRecordWriteStub(Register object,Register address,RememberedSetAction remembered_set_action,SaveFPRegsMode fp_mode,Handle<Code> code_target,Address wasm_target)664 void TurboAssembler::CallRecordWriteStub(
665 Register object, Register address,
666 RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
667 Handle<Code> code_target, Address wasm_target) {
668 DCHECK_NE(code_target.is_null(), wasm_target == kNullAddress);
669 // TODO(albertnetymk): For now we ignore remembered_set_action and fp_mode,
670 // i.e. always emit remember set and save FP registers in RecordWriteStub. If
671 // large performance regression is observed, we should use these values to
672 // avoid unnecessary work.
673
674 RecordWriteDescriptor descriptor;
675 RegList registers = descriptor.allocatable_registers();
676
677 SaveRegisters(registers);
678
679 Register object_parameter(
680 descriptor.GetRegisterParameter(RecordWriteDescriptor::kObject));
681 Register slot_parameter(
682 descriptor.GetRegisterParameter(RecordWriteDescriptor::kSlot));
683 Register remembered_set_parameter(
684 descriptor.GetRegisterParameter(RecordWriteDescriptor::kRememberedSet));
685 Register fp_mode_parameter(
686 descriptor.GetRegisterParameter(RecordWriteDescriptor::kFPMode));
687
688 push(object);
689 push(address);
690
691 pop(slot_parameter);
692 pop(object_parameter);
693
694 Move(remembered_set_parameter, Smi::FromEnum(remembered_set_action));
695 Move(fp_mode_parameter, Smi::FromEnum(fp_mode));
696 if (code_target.is_null()) {
697 Call(wasm_target, RelocInfo::WASM_STUB_CALL);
698 } else {
699 Call(code_target, RelocInfo::CODE_TARGET);
700 }
701
702 RestoreRegisters(registers);
703 }
704
705 // Will clobber 4 registers: object, address, scratch, ip. The
706 // register 'object' contains a heap object pointer. The heap object
707 // tag is shifted away.
RecordWrite(Register object,Register address,Register value,LinkRegisterStatus lr_status,SaveFPRegsMode fp_mode,RememberedSetAction remembered_set_action,SmiCheck smi_check)708 void MacroAssembler::RecordWrite(Register object, Register address,
709 Register value, LinkRegisterStatus lr_status,
710 SaveFPRegsMode fp_mode,
711 RememberedSetAction remembered_set_action,
712 SmiCheck smi_check) {
713 DCHECK(object != value);
714 if (emit_debug_code()) {
715 LoadTaggedPointerField(r0, MemOperand(address));
716 cmp(r0, value);
717 Check(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite);
718 }
719
720 if ((remembered_set_action == OMIT_REMEMBERED_SET &&
721 !FLAG_incremental_marking) ||
722 FLAG_disable_write_barriers) {
723 return;
724 }
725
726 // First, check if a write barrier is even needed. The tests below
727 // catch stores of smis and stores into the young generation.
728 Label done;
729
730 if (smi_check == INLINE_SMI_CHECK) {
731 JumpIfSmi(value, &done);
732 }
733
734 CheckPageFlag(value,
735 value, // Used as scratch.
736 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
737 CheckPageFlag(object,
738 value, // Used as scratch.
739 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
740
741 // Record the actual write.
742 if (lr_status == kLRHasNotBeenSaved) {
743 mflr(r0);
744 push(r0);
745 }
746 CallRecordWriteStub(object, address, remembered_set_action, fp_mode);
747 if (lr_status == kLRHasNotBeenSaved) {
748 pop(r0);
749 mtlr(r0);
750 }
751
752 bind(&done);
753
754 // Clobber clobbered registers when running with the debug-code flag
755 // turned on to provoke errors.
756 if (emit_debug_code()) {
757 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
758 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
759 }
760 }
761
PushCommonFrame(Register marker_reg)762 void TurboAssembler::PushCommonFrame(Register marker_reg) {
763 int fp_delta = 0;
764 mflr(r0);
765 if (FLAG_enable_embedded_constant_pool) {
766 if (marker_reg.is_valid()) {
767 Push(r0, fp, kConstantPoolRegister, marker_reg);
768 fp_delta = 2;
769 } else {
770 Push(r0, fp, kConstantPoolRegister);
771 fp_delta = 1;
772 }
773 } else {
774 if (marker_reg.is_valid()) {
775 Push(r0, fp, marker_reg);
776 fp_delta = 1;
777 } else {
778 Push(r0, fp);
779 fp_delta = 0;
780 }
781 }
782 addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
783 }
784
PushStandardFrame(Register function_reg)785 void TurboAssembler::PushStandardFrame(Register function_reg) {
786 int fp_delta = 0;
787 mflr(r0);
788 if (FLAG_enable_embedded_constant_pool) {
789 if (function_reg.is_valid()) {
790 Push(r0, fp, kConstantPoolRegister, cp, function_reg);
791 fp_delta = 3;
792 } else {
793 Push(r0, fp, kConstantPoolRegister, cp);
794 fp_delta = 2;
795 }
796 } else {
797 if (function_reg.is_valid()) {
798 Push(r0, fp, cp, function_reg);
799 fp_delta = 2;
800 } else {
801 Push(r0, fp, cp);
802 fp_delta = 1;
803 }
804 }
805 addi(fp, sp, Operand(fp_delta * kSystemPointerSize));
806 Push(kJavaScriptCallArgCountRegister);
807 }
808
RestoreFrameStateForTailCall()809 void TurboAssembler::RestoreFrameStateForTailCall() {
810 if (FLAG_enable_embedded_constant_pool) {
811 LoadP(kConstantPoolRegister,
812 MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
813 set_constant_pool_available(false);
814 }
815 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
816 LoadP(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
817 mtlr(r0);
818 }
819
CanonicalizeNaN(const DoubleRegister dst,const DoubleRegister src)820 void TurboAssembler::CanonicalizeNaN(const DoubleRegister dst,
821 const DoubleRegister src) {
822 // Turn potential sNaN into qNaN.
823 fsub(dst, src, kDoubleRegZero);
824 }
825
ConvertIntToDouble(Register src,DoubleRegister dst)826 void TurboAssembler::ConvertIntToDouble(Register src, DoubleRegister dst) {
827 MovIntToDouble(dst, src, r0);
828 fcfid(dst, dst);
829 }
830
ConvertUnsignedIntToDouble(Register src,DoubleRegister dst)831 void TurboAssembler::ConvertUnsignedIntToDouble(Register src,
832 DoubleRegister dst) {
833 MovUnsignedIntToDouble(dst, src, r0);
834 fcfid(dst, dst);
835 }
836
ConvertIntToFloat(Register src,DoubleRegister dst)837 void TurboAssembler::ConvertIntToFloat(Register src, DoubleRegister dst) {
838 MovIntToDouble(dst, src, r0);
839 fcfids(dst, dst);
840 }
841
ConvertUnsignedIntToFloat(Register src,DoubleRegister dst)842 void TurboAssembler::ConvertUnsignedIntToFloat(Register src,
843 DoubleRegister dst) {
844 MovUnsignedIntToDouble(dst, src, r0);
845 fcfids(dst, dst);
846 }
847
848 #if V8_TARGET_ARCH_PPC64
ConvertInt64ToDouble(Register src,DoubleRegister double_dst)849 void TurboAssembler::ConvertInt64ToDouble(Register src,
850 DoubleRegister double_dst) {
851 MovInt64ToDouble(double_dst, src);
852 fcfid(double_dst, double_dst);
853 }
854
ConvertUnsignedInt64ToFloat(Register src,DoubleRegister double_dst)855 void TurboAssembler::ConvertUnsignedInt64ToFloat(Register src,
856 DoubleRegister double_dst) {
857 MovInt64ToDouble(double_dst, src);
858 fcfidus(double_dst, double_dst);
859 }
860
ConvertUnsignedInt64ToDouble(Register src,DoubleRegister double_dst)861 void TurboAssembler::ConvertUnsignedInt64ToDouble(Register src,
862 DoubleRegister double_dst) {
863 MovInt64ToDouble(double_dst, src);
864 fcfidu(double_dst, double_dst);
865 }
866
ConvertInt64ToFloat(Register src,DoubleRegister double_dst)867 void TurboAssembler::ConvertInt64ToFloat(Register src,
868 DoubleRegister double_dst) {
869 MovInt64ToDouble(double_dst, src);
870 fcfids(double_dst, double_dst);
871 }
872 #endif
873
ConvertDoubleToInt64(const DoubleRegister double_input,const Register dst_hi,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)874 void TurboAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
875 #if !V8_TARGET_ARCH_PPC64
876 const Register dst_hi,
877 #endif
878 const Register dst,
879 const DoubleRegister double_dst,
880 FPRoundingMode rounding_mode) {
881 if (rounding_mode == kRoundToZero) {
882 fctidz(double_dst, double_input);
883 } else {
884 SetRoundingMode(rounding_mode);
885 fctid(double_dst, double_input);
886 ResetRoundingMode();
887 }
888
889 MovDoubleToInt64(
890 #if !V8_TARGET_ARCH_PPC64
891 dst_hi,
892 #endif
893 dst, double_dst);
894 }
895
896 #if V8_TARGET_ARCH_PPC64
ConvertDoubleToUnsignedInt64(const DoubleRegister double_input,const Register dst,const DoubleRegister double_dst,FPRoundingMode rounding_mode)897 void TurboAssembler::ConvertDoubleToUnsignedInt64(
898 const DoubleRegister double_input, const Register dst,
899 const DoubleRegister double_dst, FPRoundingMode rounding_mode) {
900 if (rounding_mode == kRoundToZero) {
901 fctiduz(double_dst, double_input);
902 } else {
903 SetRoundingMode(rounding_mode);
904 fctidu(double_dst, double_input);
905 ResetRoundingMode();
906 }
907
908 MovDoubleToInt64(dst, double_dst);
909 }
910 #endif
911
912 #if !V8_TARGET_ARCH_PPC64
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)913 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
914 Register src_low, Register src_high,
915 Register scratch, Register shift) {
916 DCHECK(!AreAliased(dst_low, src_high));
917 DCHECK(!AreAliased(dst_high, src_low));
918 DCHECK(!AreAliased(dst_low, dst_high, shift));
919 Label less_than_32;
920 Label done;
921 cmpi(shift, Operand(32));
922 blt(&less_than_32);
923 // If shift >= 32
924 andi(scratch, shift, Operand(0x1F));
925 slw(dst_high, src_low, scratch);
926 li(dst_low, Operand::Zero());
927 b(&done);
928 bind(&less_than_32);
929 // If shift < 32
930 subfic(scratch, shift, Operand(32));
931 slw(dst_high, src_high, shift);
932 srw(scratch, src_low, scratch);
933 orx(dst_high, dst_high, scratch);
934 slw(dst_low, src_low, shift);
935 bind(&done);
936 }
937
ShiftLeftPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)938 void TurboAssembler::ShiftLeftPair(Register dst_low, Register dst_high,
939 Register src_low, Register src_high,
940 uint32_t shift) {
941 DCHECK(!AreAliased(dst_low, src_high));
942 DCHECK(!AreAliased(dst_high, src_low));
943 if (shift == 32) {
944 Move(dst_high, src_low);
945 li(dst_low, Operand::Zero());
946 } else if (shift > 32) {
947 shift &= 0x1F;
948 slwi(dst_high, src_low, Operand(shift));
949 li(dst_low, Operand::Zero());
950 } else if (shift == 0) {
951 Move(dst_low, src_low);
952 Move(dst_high, src_high);
953 } else {
954 slwi(dst_high, src_high, Operand(shift));
955 rlwimi(dst_high, src_low, shift, 32 - shift, 31);
956 slwi(dst_low, src_low, Operand(shift));
957 }
958 }
959
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)960 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
961 Register src_low, Register src_high,
962 Register scratch, Register shift) {
963 DCHECK(!AreAliased(dst_low, src_high));
964 DCHECK(!AreAliased(dst_high, src_low));
965 DCHECK(!AreAliased(dst_low, dst_high, shift));
966 Label less_than_32;
967 Label done;
968 cmpi(shift, Operand(32));
969 blt(&less_than_32);
970 // If shift >= 32
971 andi(scratch, shift, Operand(0x1F));
972 srw(dst_low, src_high, scratch);
973 li(dst_high, Operand::Zero());
974 b(&done);
975 bind(&less_than_32);
976 // If shift < 32
977 subfic(scratch, shift, Operand(32));
978 srw(dst_low, src_low, shift);
979 slw(scratch, src_high, scratch);
980 orx(dst_low, dst_low, scratch);
981 srw(dst_high, src_high, shift);
982 bind(&done);
983 }
984
ShiftRightPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)985 void TurboAssembler::ShiftRightPair(Register dst_low, Register dst_high,
986 Register src_low, Register src_high,
987 uint32_t shift) {
988 DCHECK(!AreAliased(dst_low, src_high));
989 DCHECK(!AreAliased(dst_high, src_low));
990 if (shift == 32) {
991 Move(dst_low, src_high);
992 li(dst_high, Operand::Zero());
993 } else if (shift > 32) {
994 shift &= 0x1F;
995 srwi(dst_low, src_high, Operand(shift));
996 li(dst_high, Operand::Zero());
997 } else if (shift == 0) {
998 Move(dst_low, src_low);
999 Move(dst_high, src_high);
1000 } else {
1001 srwi(dst_low, src_low, Operand(shift));
1002 rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
1003 srwi(dst_high, src_high, Operand(shift));
1004 }
1005 }
1006
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,Register scratch,Register shift)1007 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
1008 Register src_low, Register src_high,
1009 Register scratch, Register shift) {
1010 DCHECK(!AreAliased(dst_low, src_high, shift));
1011 DCHECK(!AreAliased(dst_high, src_low, shift));
1012 Label less_than_32;
1013 Label done;
1014 cmpi(shift, Operand(32));
1015 blt(&less_than_32);
1016 // If shift >= 32
1017 andi(scratch, shift, Operand(0x1F));
1018 sraw(dst_low, src_high, scratch);
1019 srawi(dst_high, src_high, 31);
1020 b(&done);
1021 bind(&less_than_32);
1022 // If shift < 32
1023 subfic(scratch, shift, Operand(32));
1024 srw(dst_low, src_low, shift);
1025 slw(scratch, src_high, scratch);
1026 orx(dst_low, dst_low, scratch);
1027 sraw(dst_high, src_high, shift);
1028 bind(&done);
1029 }
1030
ShiftRightAlgPair(Register dst_low,Register dst_high,Register src_low,Register src_high,uint32_t shift)1031 void TurboAssembler::ShiftRightAlgPair(Register dst_low, Register dst_high,
1032 Register src_low, Register src_high,
1033 uint32_t shift) {
1034 DCHECK(!AreAliased(dst_low, src_high));
1035 DCHECK(!AreAliased(dst_high, src_low));
1036 if (shift == 32) {
1037 Move(dst_low, src_high);
1038 srawi(dst_high, src_high, 31);
1039 } else if (shift > 32) {
1040 shift &= 0x1F;
1041 srawi(dst_low, src_high, shift);
1042 srawi(dst_high, src_high, 31);
1043 } else if (shift == 0) {
1044 Move(dst_low, src_low);
1045 Move(dst_high, src_high);
1046 } else {
1047 srwi(dst_low, src_low, Operand(shift));
1048 rlwimi(dst_low, src_high, 32 - shift, 0, shift - 1);
1049 srawi(dst_high, src_high, shift);
1050 }
1051 }
1052 #endif
1053
LoadConstantPoolPointerRegisterFromCodeTargetAddress(Register code_target_address)1054 void TurboAssembler::LoadConstantPoolPointerRegisterFromCodeTargetAddress(
1055 Register code_target_address) {
1056 // Builtins do not use the constant pool (see is_constant_pool_available).
1057 STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
1058
1059 lwz(r0, MemOperand(code_target_address,
1060 Code::kInstructionSizeOffset - Code::kHeaderSize));
1061 lwz(kConstantPoolRegister,
1062 MemOperand(code_target_address,
1063 Code::kConstantPoolOffsetOffset - Code::kHeaderSize));
1064 add(kConstantPoolRegister, kConstantPoolRegister, code_target_address);
1065 add(kConstantPoolRegister, kConstantPoolRegister, r0);
1066 }
1067
LoadPC(Register dst)1068 void TurboAssembler::LoadPC(Register dst) {
1069 b(4, SetLK);
1070 mflr(dst);
1071 }
1072
ComputeCodeStartAddress(Register dst)1073 void TurboAssembler::ComputeCodeStartAddress(Register dst) {
1074 mflr(r0);
1075 LoadPC(dst);
1076 subi(dst, dst, Operand(pc_offset() - kInstrSize));
1077 mtlr(r0);
1078 }
1079
LoadConstantPoolPointerRegister()1080 void TurboAssembler::LoadConstantPoolPointerRegister() {
1081 //
1082 // Builtins do not use the constant pool (see is_constant_pool_available).
1083 STATIC_ASSERT(Code::kOnHeapBodyIsContiguous);
1084
1085 LoadPC(kConstantPoolRegister);
1086 int32_t delta = -pc_offset() + 4;
1087 add_label_offset(kConstantPoolRegister, kConstantPoolRegister,
1088 ConstantPoolPosition(), delta);
1089 }
1090
StubPrologue(StackFrame::Type type)1091 void TurboAssembler::StubPrologue(StackFrame::Type type) {
1092 {
1093 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1094 mov(r11, Operand(StackFrame::TypeToMarker(type)));
1095 PushCommonFrame(r11);
1096 }
1097 if (FLAG_enable_embedded_constant_pool) {
1098 LoadConstantPoolPointerRegister();
1099 set_constant_pool_available(true);
1100 }
1101 }
1102
Prologue()1103 void TurboAssembler::Prologue() {
1104 PushStandardFrame(r4);
1105 if (FLAG_enable_embedded_constant_pool) {
1106 // base contains prologue address
1107 LoadConstantPoolPointerRegister();
1108 set_constant_pool_available(true);
1109 }
1110 }
1111
EnterFrame(StackFrame::Type type,bool load_constant_pool_pointer_reg)1112 void TurboAssembler::EnterFrame(StackFrame::Type type,
1113 bool load_constant_pool_pointer_reg) {
1114 if (FLAG_enable_embedded_constant_pool && load_constant_pool_pointer_reg) {
1115 // Push type explicitly so we can leverage the constant pool.
1116 // This path cannot rely on ip containing code entry.
1117 PushCommonFrame();
1118 LoadConstantPoolPointerRegister();
1119 mov(ip, Operand(StackFrame::TypeToMarker(type)));
1120 push(ip);
1121 } else {
1122 mov(ip, Operand(StackFrame::TypeToMarker(type)));
1123 PushCommonFrame(ip);
1124 }
1125 }
1126
LeaveFrame(StackFrame::Type type,int stack_adjustment)1127 int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
1128 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1129 // r3: preserved
1130 // r4: preserved
1131 // r5: preserved
1132
1133 // Drop the execution stack down to the frame pointer and restore
1134 // the caller's state.
1135 int frame_ends;
1136 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
1137 LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
1138 if (FLAG_enable_embedded_constant_pool) {
1139 LoadP(kConstantPoolRegister,
1140 MemOperand(fp, StandardFrameConstants::kConstantPoolOffset));
1141 }
1142 mtlr(r0);
1143 frame_ends = pc_offset();
1144 Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
1145 mr(fp, ip);
1146 return frame_ends;
1147 }
1148
1149 // ExitFrame layout (probably wrongish.. needs updating)
1150 //
1151 // SP -> previousSP
1152 // LK reserved
1153 // sp_on_exit (for debug?)
1154 // oldSP->prev SP
1155 // LK
1156 // <parameters on stack>
1157
1158 // Prior to calling EnterExitFrame, we've got a bunch of parameters
1159 // on the stack that we need to wrap a real frame around.. so first
1160 // we reserve a slot for LK and push the previous SP which is captured
1161 // in the fp register (r31)
1162 // Then - we buy a new frame
1163
EnterExitFrame(bool save_doubles,int stack_space,StackFrame::Type frame_type)1164 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
1165 StackFrame::Type frame_type) {
1166 DCHECK(frame_type == StackFrame::EXIT ||
1167 frame_type == StackFrame::BUILTIN_EXIT);
1168 // Set up the frame structure on the stack.
1169 DCHECK_EQ(2 * kSystemPointerSize, ExitFrameConstants::kCallerSPDisplacement);
1170 DCHECK_EQ(1 * kSystemPointerSize, ExitFrameConstants::kCallerPCOffset);
1171 DCHECK_EQ(0 * kSystemPointerSize, ExitFrameConstants::kCallerFPOffset);
1172 DCHECK_GT(stack_space, 0);
1173
1174 // This is an opportunity to build a frame to wrap
1175 // all of the pushes that have happened inside of V8
1176 // since we were called from C code
1177
1178 mov(ip, Operand(StackFrame::TypeToMarker(frame_type)));
1179 PushCommonFrame(ip);
1180 // Reserve room for saved entry sp.
1181 subi(sp, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp));
1182
1183 if (emit_debug_code()) {
1184 li(r8, Operand::Zero());
1185 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1186 }
1187 if (FLAG_enable_embedded_constant_pool) {
1188 StoreP(kConstantPoolRegister,
1189 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
1190 }
1191
1192 // Save the frame pointer and the context in top.
1193 Move(r8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1194 isolate()));
1195 StoreP(fp, MemOperand(r8));
1196 Move(r8,
1197 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1198 StoreP(cp, MemOperand(r8));
1199
1200 // Optionally save all volatile double registers.
1201 if (save_doubles) {
1202 MultiPushDoubles(kCallerSavedDoubles);
1203 // Note that d0 will be accessible at
1204 // fp - ExitFrameConstants::kFrameSize -
1205 // kNumCallerSavedDoubles * kDoubleSize,
1206 // since the sp slot and code slot were pushed after the fp.
1207 }
1208
1209 addi(sp, sp, Operand(-stack_space * kSystemPointerSize));
1210
1211 // Allocate and align the frame preparing for calling the runtime
1212 // function.
1213 const int frame_alignment = ActivationFrameAlignment();
1214 if (frame_alignment > kSystemPointerSize) {
1215 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1216 ClearRightImm(sp, sp,
1217 Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1218 }
1219 li(r0, Operand::Zero());
1220 StorePU(r0,
1221 MemOperand(sp, -kNumRequiredStackFrameSlots * kSystemPointerSize));
1222
1223 // Set the exit frame sp value to point just before the return address
1224 // location.
1225 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kSystemPointerSize));
1226 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
1227 }
1228
ActivationFrameAlignment()1229 int TurboAssembler::ActivationFrameAlignment() {
1230 #if !defined(USE_SIMULATOR)
1231 // Running on the real platform. Use the alignment as mandated by the local
1232 // environment.
1233 // Note: This will break if we ever start generating snapshots on one PPC
1234 // platform for another PPC platform with a different alignment.
1235 return base::OS::ActivationFrameAlignment();
1236 #else // Simulated
1237 // If we are using the simulator then we should always align to the expected
1238 // alignment. As the simulator is used to generate snapshots we do not know
1239 // if the target platform will need alignment, so this is controlled from a
1240 // flag.
1241 return FLAG_sim_stack_alignment;
1242 #endif
1243 }
1244
LeaveExitFrame(bool save_doubles,Register argument_count,bool argument_count_is_length)1245 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
1246 bool argument_count_is_length) {
1247 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1248 // Optionally restore all double registers.
1249 if (save_doubles) {
1250 // Calculate the stack location of the saved doubles and restore them.
1251 const int kNumRegs = kNumCallerSavedDoubles;
1252 const int offset =
1253 (ExitFrameConstants::kFixedFrameSizeFromFp + kNumRegs * kDoubleSize);
1254 addi(r6, fp, Operand(-offset));
1255 MultiPopDoubles(kCallerSavedDoubles, r6);
1256 }
1257
1258 // Clear top frame.
1259 li(r6, Operand::Zero());
1260 Move(ip, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
1261 isolate()));
1262 StoreP(r6, MemOperand(ip));
1263
1264 // Restore current context from top and clear it in debug mode.
1265 Move(ip,
1266 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1267 LoadP(cp, MemOperand(ip));
1268
1269 #ifdef DEBUG
1270 mov(r6, Operand(Context::kInvalidContext));
1271 Move(ip,
1272 ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
1273 StoreP(r6, MemOperand(ip));
1274 #endif
1275
1276 // Tear down the exit frame, pop the arguments, and return.
1277 LeaveFrame(StackFrame::EXIT);
1278
1279 if (argument_count.is_valid()) {
1280 if (!argument_count_is_length) {
1281 ShiftLeftImm(argument_count, argument_count,
1282 Operand(kSystemPointerSizeLog2));
1283 }
1284 add(sp, sp, argument_count);
1285 }
1286 }
1287
MovFromFloatResult(const DoubleRegister dst)1288 void TurboAssembler::MovFromFloatResult(const DoubleRegister dst) {
1289 Move(dst, d1);
1290 }
1291
MovFromFloatParameter(const DoubleRegister dst)1292 void TurboAssembler::MovFromFloatParameter(const DoubleRegister dst) {
1293 Move(dst, d1);
1294 }
1295
PrepareForTailCall(Register callee_args_count,Register caller_args_count,Register scratch0,Register scratch1)1296 void TurboAssembler::PrepareForTailCall(Register callee_args_count,
1297 Register caller_args_count,
1298 Register scratch0, Register scratch1) {
1299 DCHECK(!AreAliased(callee_args_count, caller_args_count, scratch0, scratch1));
1300
1301 // Calculate the end of destination area where we will put the arguments
1302 // after we drop current frame. We add kSystemPointerSize to count the
1303 // receiver argument which is not included into formal parameters count.
1304 Register dst_reg = scratch0;
1305 ShiftLeftImm(dst_reg, caller_args_count, Operand(kSystemPointerSizeLog2));
1306 add(dst_reg, fp, dst_reg);
1307 addi(dst_reg, dst_reg,
1308 Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
1309
1310 Register src_reg = caller_args_count;
1311 // Calculate the end of source area. +kSystemPointerSize is for the receiver.
1312 ShiftLeftImm(src_reg, callee_args_count, Operand(kSystemPointerSizeLog2));
1313 add(src_reg, sp, src_reg);
1314 addi(src_reg, src_reg, Operand(kSystemPointerSize));
1315
1316 if (FLAG_debug_code) {
1317 cmpl(src_reg, dst_reg);
1318 Check(lt, AbortReason::kStackAccessBelowStackPointer);
1319 }
1320
1321 // Restore caller's frame pointer and return address now as they will be
1322 // overwritten by the copying loop.
1323 RestoreFrameStateForTailCall();
1324
1325 // Now copy callee arguments to the caller frame going backwards to avoid
1326 // callee arguments corruption (source and destination areas could overlap).
1327
1328 // Both src_reg and dst_reg are pointing to the word after the one to copy,
1329 // so they must be pre-decremented in the loop.
1330 Register tmp_reg = scratch1;
1331 Label loop;
1332 addi(tmp_reg, callee_args_count, Operand(1)); // +1 for receiver
1333 mtctr(tmp_reg);
1334 bind(&loop);
1335 LoadPU(tmp_reg, MemOperand(src_reg, -kSystemPointerSize));
1336 StorePU(tmp_reg, MemOperand(dst_reg, -kSystemPointerSize));
1337 bdnz(&loop);
1338
1339 // Leave current frame.
1340 mr(sp, dst_reg);
1341 }
1342
InvokePrologue(Register expected_parameter_count,Register actual_parameter_count,Label * done,InvokeFlag flag)1343 void MacroAssembler::InvokePrologue(Register expected_parameter_count,
1344 Register actual_parameter_count,
1345 Label* done, InvokeFlag flag) {
1346 Label regular_invoke;
1347
1348 // Check whether the expected and actual arguments count match. If not,
1349 // setup registers according to contract with ArgumentsAdaptorTrampoline:
1350 // r3: actual arguments count
1351 // r4: function (passed through to callee)
1352 // r5: expected arguments count
1353
1354 // The code below is made a lot easier because the calling code already sets
1355 // up actual and expected registers according to the contract if values are
1356 // passed in registers.
1357
1358 // The code below is made a lot easier because the calling code already sets
1359 // up actual and expected registers according to the contract.
1360 // ARM has some checks as per below, considering add them for PPC
1361 // DCHECK_EQ(actual_parameter_count, r3);
1362 // DCHECK_EQ(expected_parameter_count, r5);
1363
1364 cmp(expected_parameter_count, actual_parameter_count);
1365 beq(®ular_invoke);
1366
1367 Handle<Code> adaptor = BUILTIN_CODE(isolate(), ArgumentsAdaptorTrampoline);
1368 if (flag == CALL_FUNCTION) {
1369 Call(adaptor);
1370 b(done);
1371 } else {
1372 Jump(adaptor, RelocInfo::CODE_TARGET);
1373 }
1374 bind(®ular_invoke);
1375 }
1376
CheckDebugHook(Register fun,Register new_target,Register expected_parameter_count,Register actual_parameter_count)1377 void MacroAssembler::CheckDebugHook(Register fun, Register new_target,
1378 Register expected_parameter_count,
1379 Register actual_parameter_count) {
1380 Label skip_hook;
1381
1382 ExternalReference debug_hook_active =
1383 ExternalReference::debug_hook_on_function_call_address(isolate());
1384 Move(r7, debug_hook_active);
1385 LoadByte(r7, MemOperand(r7), r0);
1386 extsb(r7, r7);
1387 CmpSmiLiteral(r7, Smi::zero(), r0);
1388 beq(&skip_hook);
1389
1390 {
1391 // Load receiver to pass it later to DebugOnFunctionCall hook.
1392 LoadReceiver(r7, actual_parameter_count);
1393 FrameScope frame(this,
1394 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
1395
1396 SmiTag(expected_parameter_count);
1397 Push(expected_parameter_count);
1398
1399 SmiTag(actual_parameter_count);
1400 Push(actual_parameter_count);
1401
1402 if (new_target.is_valid()) {
1403 Push(new_target);
1404 }
1405 Push(fun, fun, r7);
1406 CallRuntime(Runtime::kDebugOnFunctionCall);
1407 Pop(fun);
1408 if (new_target.is_valid()) {
1409 Pop(new_target);
1410 }
1411
1412 Pop(actual_parameter_count);
1413 SmiUntag(actual_parameter_count);
1414
1415 Pop(expected_parameter_count);
1416 SmiUntag(expected_parameter_count);
1417 }
1418 bind(&skip_hook);
1419 }
1420
InvokeFunctionCode(Register function,Register new_target,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1421 void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
1422 Register expected_parameter_count,
1423 Register actual_parameter_count,
1424 InvokeFlag flag) {
1425 // You can't call a function without a valid frame.
1426 DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1427 DCHECK_EQ(function, r4);
1428 DCHECK_IMPLIES(new_target.is_valid(), new_target == r6);
1429
1430 // On function call, call into the debugger if necessary.
1431 CheckDebugHook(function, new_target, expected_parameter_count,
1432 actual_parameter_count);
1433
1434 // Clear the new.target register if not given.
1435 if (!new_target.is_valid()) {
1436 LoadRoot(r6, RootIndex::kUndefinedValue);
1437 }
1438
1439 Label done;
1440 InvokePrologue(expected_parameter_count, actual_parameter_count, &done, flag);
1441 // We call indirectly through the code field in the function to
1442 // allow recompilation to take effect without changing any of the
1443 // call sites.
1444 Register code = kJavaScriptCallCodeStartRegister;
1445 LoadTaggedPointerField(code,
1446 FieldMemOperand(function, JSFunction::kCodeOffset));
1447 if (flag == CALL_FUNCTION) {
1448 CallCodeObject(code);
1449 } else {
1450 DCHECK(flag == JUMP_FUNCTION);
1451 JumpCodeObject(code);
1452 }
1453
1454 // Continue here if InvokePrologue does handle the invocation due to
1455 // mismatched parameter counts.
1456 bind(&done);
1457 }
1458
InvokeFunctionWithNewTarget(Register fun,Register new_target,Register actual_parameter_count,InvokeFlag flag)1459 void MacroAssembler::InvokeFunctionWithNewTarget(
1460 Register fun, Register new_target, Register actual_parameter_count,
1461 InvokeFlag flag) {
1462 // You can't call a function without a valid frame.
1463 DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1464
1465 // Contract with called JS functions requires that function is passed in r4.
1466 DCHECK_EQ(fun, r4);
1467
1468 Register expected_reg = r5;
1469 Register temp_reg = r7;
1470
1471 LoadTaggedPointerField(
1472 temp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1473 LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1474 LoadHalfWord(expected_reg,
1475 FieldMemOperand(
1476 temp_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1477
1478 InvokeFunctionCode(fun, new_target, expected_reg, actual_parameter_count,
1479 flag);
1480 }
1481
InvokeFunction(Register function,Register expected_parameter_count,Register actual_parameter_count,InvokeFlag flag)1482 void MacroAssembler::InvokeFunction(Register function,
1483 Register expected_parameter_count,
1484 Register actual_parameter_count,
1485 InvokeFlag flag) {
1486 // You can't call a function without a valid frame.
1487 DCHECK_IMPLIES(flag == CALL_FUNCTION, has_frame());
1488
1489 // Contract with called JS functions requires that function is passed in r4.
1490 DCHECK_EQ(function, r4);
1491
1492 // Get the function and setup the context.
1493 LoadTaggedPointerField(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1494
1495 InvokeFunctionCode(r4, no_reg, expected_parameter_count,
1496 actual_parameter_count, flag);
1497 }
1498
MaybeDropFrames()1499 void MacroAssembler::MaybeDropFrames() {
1500 // Check whether we need to drop frames to restart a function on the stack.
1501 ExternalReference restart_fp =
1502 ExternalReference::debug_restart_fp_address(isolate());
1503 Move(r4, restart_fp);
1504 LoadP(r4, MemOperand(r4));
1505 cmpi(r4, Operand::Zero());
1506 Jump(BUILTIN_CODE(isolate(), FrameDropperTrampoline), RelocInfo::CODE_TARGET,
1507 ne);
1508 }
1509
PushStackHandler()1510 void MacroAssembler::PushStackHandler() {
1511 // Adjust this code if not the case.
1512 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1513 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kSystemPointerSize);
1514
1515 Push(Smi::zero()); // Padding.
1516
1517 // Link the current handler as the next handler.
1518 // Preserve r4-r8.
1519 Move(r3,
1520 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1521 LoadP(r0, MemOperand(r3));
1522 push(r0);
1523
1524 // Set this new handler as the current one.
1525 StoreP(sp, MemOperand(r3));
1526 }
1527
PopStackHandler()1528 void MacroAssembler::PopStackHandler() {
1529 STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kSystemPointerSize);
1530 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1531
1532 pop(r4);
1533 Move(ip,
1534 ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
1535 StoreP(r4, MemOperand(ip));
1536
1537 Drop(1); // Drop padding.
1538 }
1539
CompareObjectType(Register object,Register map,Register type_reg,InstanceType type)1540 void MacroAssembler::CompareObjectType(Register object, Register map,
1541 Register type_reg, InstanceType type) {
1542 const Register temp = type_reg == no_reg ? r0 : type_reg;
1543
1544 LoadMap(map, object);
1545 CompareInstanceType(map, temp, type);
1546 }
1547
CompareInstanceType(Register map,Register type_reg,InstanceType type)1548 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1549 InstanceType type) {
1550 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1551 STATIC_ASSERT(LAST_TYPE <= 0xFFFF);
1552 lhz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1553 cmpi(type_reg, Operand(type));
1554 }
1555
CompareRoot(Register obj,RootIndex index)1556 void MacroAssembler::CompareRoot(Register obj, RootIndex index) {
1557 DCHECK(obj != r0);
1558 LoadRoot(r0, index);
1559 cmp(obj, r0);
1560 }
1561
AddAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1562 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1563 Register right,
1564 Register overflow_dst,
1565 Register scratch) {
1566 DCHECK(dst != overflow_dst);
1567 DCHECK(dst != scratch);
1568 DCHECK(overflow_dst != scratch);
1569 DCHECK(overflow_dst != left);
1570 DCHECK(overflow_dst != right);
1571
1572 bool left_is_right = left == right;
1573 RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1574
1575 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1576 if (dst == left) {
1577 mr(scratch, left); // Preserve left.
1578 add(dst, left, right); // Left is overwritten.
1579 xor_(overflow_dst, dst, scratch, xorRC); // Original left.
1580 if (!left_is_right) xor_(scratch, dst, right);
1581 } else if (dst == right) {
1582 mr(scratch, right); // Preserve right.
1583 add(dst, left, right); // Right is overwritten.
1584 xor_(overflow_dst, dst, left, xorRC);
1585 if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
1586 } else {
1587 add(dst, left, right);
1588 xor_(overflow_dst, dst, left, xorRC);
1589 if (!left_is_right) xor_(scratch, dst, right);
1590 }
1591 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1592 }
1593
AddAndCheckForOverflow(Register dst,Register left,intptr_t right,Register overflow_dst,Register scratch)1594 void TurboAssembler::AddAndCheckForOverflow(Register dst, Register left,
1595 intptr_t right,
1596 Register overflow_dst,
1597 Register scratch) {
1598 Register original_left = left;
1599 DCHECK(dst != overflow_dst);
1600 DCHECK(dst != scratch);
1601 DCHECK(overflow_dst != scratch);
1602 DCHECK(overflow_dst != left);
1603
1604 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1605 if (dst == left) {
1606 // Preserve left.
1607 original_left = overflow_dst;
1608 mr(original_left, left);
1609 }
1610 Add(dst, left, right, scratch);
1611 xor_(overflow_dst, dst, original_left);
1612 if (right >= 0) {
1613 and_(overflow_dst, overflow_dst, dst, SetRC);
1614 } else {
1615 andc(overflow_dst, overflow_dst, dst, SetRC);
1616 }
1617 }
1618
SubAndCheckForOverflow(Register dst,Register left,Register right,Register overflow_dst,Register scratch)1619 void TurboAssembler::SubAndCheckForOverflow(Register dst, Register left,
1620 Register right,
1621 Register overflow_dst,
1622 Register scratch) {
1623 DCHECK(dst != overflow_dst);
1624 DCHECK(dst != scratch);
1625 DCHECK(overflow_dst != scratch);
1626 DCHECK(overflow_dst != left);
1627 DCHECK(overflow_dst != right);
1628
1629 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1630 if (dst == left) {
1631 mr(scratch, left); // Preserve left.
1632 sub(dst, left, right); // Left is overwritten.
1633 xor_(overflow_dst, dst, scratch);
1634 xor_(scratch, scratch, right);
1635 and_(overflow_dst, overflow_dst, scratch, SetRC);
1636 } else if (dst == right) {
1637 mr(scratch, right); // Preserve right.
1638 sub(dst, left, right); // Right is overwritten.
1639 xor_(overflow_dst, dst, left);
1640 xor_(scratch, left, scratch);
1641 and_(overflow_dst, overflow_dst, scratch, SetRC);
1642 } else {
1643 sub(dst, left, right);
1644 xor_(overflow_dst, dst, left);
1645 xor_(scratch, left, right);
1646 and_(overflow_dst, scratch, overflow_dst, SetRC);
1647 }
1648 }
1649
JumpIfIsInRange(Register value,unsigned lower_limit,unsigned higher_limit,Label * on_in_range)1650 void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
1651 unsigned higher_limit,
1652 Label* on_in_range) {
1653 Register scratch = r0;
1654 if (lower_limit != 0) {
1655 mov(scratch, Operand(lower_limit));
1656 sub(scratch, value, scratch);
1657 cmpli(scratch, Operand(higher_limit - lower_limit));
1658 } else {
1659 mov(scratch, Operand(higher_limit));
1660 cmpl(value, scratch);
1661 }
1662 ble(on_in_range);
1663 }
1664
TruncateDoubleToI(Isolate * isolate,Zone * zone,Register result,DoubleRegister double_input,StubCallMode stub_mode)1665 void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
1666 Register result,
1667 DoubleRegister double_input,
1668 StubCallMode stub_mode) {
1669 Label done;
1670
1671 TryInlineTruncateDoubleToI(result, double_input, &done);
1672
1673 // If we fell through then inline version didn't succeed - call stub instead.
1674 mflr(r0);
1675 push(r0);
1676 // Put input on stack.
1677 stfdu(double_input, MemOperand(sp, -kDoubleSize));
1678
1679 if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
1680 Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
1681 } else {
1682 Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
1683 }
1684
1685 LoadP(result, MemOperand(sp));
1686 addi(sp, sp, Operand(kDoubleSize));
1687 pop(r0);
1688 mtlr(r0);
1689
1690 bind(&done);
1691 }
1692
TryInlineTruncateDoubleToI(Register result,DoubleRegister double_input,Label * done)1693 void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
1694 DoubleRegister double_input,
1695 Label* done) {
1696 DoubleRegister double_scratch = kScratchDoubleReg;
1697 #if !V8_TARGET_ARCH_PPC64
1698 Register scratch = ip;
1699 #endif
1700
1701 ConvertDoubleToInt64(double_input,
1702 #if !V8_TARGET_ARCH_PPC64
1703 scratch,
1704 #endif
1705 result, double_scratch);
1706
1707 // Test for overflow
1708 #if V8_TARGET_ARCH_PPC64
1709 TestIfInt32(result, r0);
1710 #else
1711 TestIfInt32(scratch, result, r0);
1712 #endif
1713 beq(done);
1714 }
1715
CallRuntime(const Runtime::Function * f,int num_arguments,SaveFPRegsMode save_doubles)1716 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
1717 SaveFPRegsMode save_doubles) {
1718 // All parameters are on the stack. r3 has the return value after call.
1719
1720 // If the expected number of arguments of the runtime function is
1721 // constant, we check that the actual number of arguments match the
1722 // expectation.
1723 CHECK(f->nargs < 0 || f->nargs == num_arguments);
1724
1725 // TODO(1236192): Most runtime routines don't need the number of
1726 // arguments passed in because it is constant. At some point we
1727 // should remove this need and make the runtime routine entry code
1728 // smarter.
1729 mov(r3, Operand(num_arguments));
1730 Move(r4, ExternalReference::Create(f));
1731 #if V8_TARGET_ARCH_PPC64
1732 Handle<Code> code =
1733 CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
1734 #else
1735 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, save_doubles);
1736 #endif
1737 Call(code, RelocInfo::CODE_TARGET);
1738 }
1739
TailCallRuntime(Runtime::FunctionId fid)1740 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
1741 const Runtime::Function* function = Runtime::FunctionForId(fid);
1742 DCHECK_EQ(1, function->result_size);
1743 if (function->nargs >= 0) {
1744 mov(r3, Operand(function->nargs));
1745 }
1746 JumpToExternalReference(ExternalReference::Create(fid));
1747 }
1748
JumpToExternalReference(const ExternalReference & builtin,bool builtin_exit_frame)1749 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
1750 bool builtin_exit_frame) {
1751 Move(r4, builtin);
1752 Handle<Code> code = CodeFactory::CEntry(isolate(), 1, kDontSaveFPRegs,
1753 kArgvOnStack, builtin_exit_frame);
1754 Jump(code, RelocInfo::CODE_TARGET);
1755 }
1756
JumpToInstructionStream(Address entry)1757 void MacroAssembler::JumpToInstructionStream(Address entry) {
1758 mov(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
1759 Jump(kOffHeapTrampolineRegister);
1760 }
1761
LoadWeakValue(Register out,Register in,Label * target_if_cleared)1762 void MacroAssembler::LoadWeakValue(Register out, Register in,
1763 Label* target_if_cleared) {
1764 cmpwi(in, Operand(kClearedWeakHeapObjectLower32));
1765 beq(target_if_cleared);
1766
1767 mov(r0, Operand(~kWeakHeapObjectMask));
1768 and_(out, in, r0);
1769 }
1770
IncrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1771 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
1772 Register scratch1, Register scratch2) {
1773 DCHECK_GT(value, 0);
1774 if (FLAG_native_code_counters && counter->Enabled()) {
1775 // This operation has to be exactly 32-bit wide in case the external
1776 // reference table redirects the counter to a uint32_t dummy_stats_counter_
1777 // field.
1778 Move(scratch2, ExternalReference::Create(counter));
1779 lwz(scratch1, MemOperand(scratch2));
1780 addi(scratch1, scratch1, Operand(value));
1781 stw(scratch1, MemOperand(scratch2));
1782 }
1783 }
1784
DecrementCounter(StatsCounter * counter,int value,Register scratch1,Register scratch2)1785 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
1786 Register scratch1, Register scratch2) {
1787 DCHECK_GT(value, 0);
1788 if (FLAG_native_code_counters && counter->Enabled()) {
1789 // This operation has to be exactly 32-bit wide in case the external
1790 // reference table redirects the counter to a uint32_t dummy_stats_counter_
1791 // field.
1792 Move(scratch2, ExternalReference::Create(counter));
1793 lwz(scratch1, MemOperand(scratch2));
1794 subi(scratch1, scratch1, Operand(value));
1795 stw(scratch1, MemOperand(scratch2));
1796 }
1797 }
1798
Assert(Condition cond,AbortReason reason,CRegister cr)1799 void TurboAssembler::Assert(Condition cond, AbortReason reason, CRegister cr) {
1800 if (emit_debug_code()) Check(cond, reason, cr);
1801 }
1802
Check(Condition cond,AbortReason reason,CRegister cr)1803 void TurboAssembler::Check(Condition cond, AbortReason reason, CRegister cr) {
1804 Label L;
1805 b(cond, &L, cr);
1806 Abort(reason);
1807 // will not return here
1808 bind(&L);
1809 }
1810
Abort(AbortReason reason)1811 void TurboAssembler::Abort(AbortReason reason) {
1812 Label abort_start;
1813 bind(&abort_start);
1814 #ifdef DEBUG
1815 const char* msg = GetAbortReason(reason);
1816 RecordComment("Abort message: ");
1817 RecordComment(msg);
1818 #endif
1819
1820 // Avoid emitting call to builtin if requested.
1821 if (trap_on_abort()) {
1822 stop();
1823 return;
1824 }
1825
1826 if (should_abort_hard()) {
1827 // We don't care if we constructed a frame. Just pretend we did.
1828 FrameScope assume_frame(this, StackFrame::NONE);
1829 mov(r3, Operand(static_cast<int>(reason)));
1830 PrepareCallCFunction(1, r4);
1831 CallCFunction(ExternalReference::abort_with_reason(), 1);
1832 return;
1833 }
1834
1835 LoadSmiLiteral(r4, Smi::FromInt(static_cast<int>(reason)));
1836
1837 // Disable stub call restrictions to always allow calls to abort.
1838 if (!has_frame_) {
1839 // We don't actually want to generate a pile of code for this, so just
1840 // claim there is a stack frame, without generating one.
1841 FrameScope scope(this, StackFrame::NONE);
1842 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1843 } else {
1844 Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
1845 }
1846 // will not return here
1847 }
1848
LoadMap(Register destination,Register object)1849 void MacroAssembler::LoadMap(Register destination, Register object) {
1850 LoadTaggedPointerField(destination,
1851 FieldMemOperand(object, HeapObject::kMapOffset));
1852 }
1853
LoadNativeContextSlot(int index,Register dst)1854 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) {
1855 LoadMap(dst, cp);
1856 LoadTaggedPointerField(
1857 dst, FieldMemOperand(
1858 dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
1859 LoadTaggedPointerField(dst, MemOperand(dst, Context::SlotOffset(index)));
1860 }
1861
AssertNotSmi(Register object)1862 void MacroAssembler::AssertNotSmi(Register object) {
1863 if (emit_debug_code()) {
1864 STATIC_ASSERT(kSmiTag == 0);
1865 TestIfSmi(object, r0);
1866 Check(ne, AbortReason::kOperandIsASmi, cr0);
1867 }
1868 }
1869
AssertSmi(Register object)1870 void MacroAssembler::AssertSmi(Register object) {
1871 if (emit_debug_code()) {
1872 STATIC_ASSERT(kSmiTag == 0);
1873 TestIfSmi(object, r0);
1874 Check(eq, AbortReason::kOperandIsNotASmi, cr0);
1875 }
1876 }
1877
AssertConstructor(Register object)1878 void MacroAssembler::AssertConstructor(Register object) {
1879 if (emit_debug_code()) {
1880 STATIC_ASSERT(kSmiTag == 0);
1881 TestIfSmi(object, r0);
1882 Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, cr0);
1883 push(object);
1884 LoadMap(object, object);
1885 lbz(object, FieldMemOperand(object, Map::kBitFieldOffset));
1886 andi(object, object, Operand(Map::Bits1::IsConstructorBit::kMask));
1887 pop(object);
1888 Check(ne, AbortReason::kOperandIsNotAConstructor, cr0);
1889 }
1890 }
1891
AssertFunction(Register object)1892 void MacroAssembler::AssertFunction(Register object) {
1893 if (emit_debug_code()) {
1894 STATIC_ASSERT(kSmiTag == 0);
1895 TestIfSmi(object, r0);
1896 Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, cr0);
1897 push(object);
1898 CompareObjectType(object, object, object, JS_FUNCTION_TYPE);
1899 pop(object);
1900 Check(eq, AbortReason::kOperandIsNotAFunction);
1901 }
1902 }
1903
AssertBoundFunction(Register object)1904 void MacroAssembler::AssertBoundFunction(Register object) {
1905 if (emit_debug_code()) {
1906 STATIC_ASSERT(kSmiTag == 0);
1907 TestIfSmi(object, r0);
1908 Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, cr0);
1909 push(object);
1910 CompareObjectType(object, object, object, JS_BOUND_FUNCTION_TYPE);
1911 pop(object);
1912 Check(eq, AbortReason::kOperandIsNotABoundFunction);
1913 }
1914 }
1915
AssertGeneratorObject(Register object)1916 void MacroAssembler::AssertGeneratorObject(Register object) {
1917 if (!emit_debug_code()) return;
1918 TestIfSmi(object, r0);
1919 Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, cr0);
1920
1921 // Load map
1922 Register map = object;
1923 push(object);
1924 LoadMap(map, object);
1925
1926 // Check if JSGeneratorObject
1927 Label do_check;
1928 Register instance_type = object;
1929 CompareInstanceType(map, instance_type, JS_GENERATOR_OBJECT_TYPE);
1930 beq(&do_check);
1931
1932 // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
1933 cmpi(instance_type, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
1934 beq(&do_check);
1935
1936 // Check if JSAsyncGeneratorObject (See MacroAssembler::CompareInstanceType)
1937 cmpi(instance_type, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
1938
1939 bind(&do_check);
1940 // Restore generator object to register and perform assertion
1941 pop(object);
1942 Check(eq, AbortReason::kOperandIsNotAGeneratorObject);
1943 }
1944
AssertUndefinedOrAllocationSite(Register object,Register scratch)1945 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
1946 Register scratch) {
1947 if (emit_debug_code()) {
1948 Label done_checking;
1949 AssertNotSmi(object);
1950 CompareRoot(object, RootIndex::kUndefinedValue);
1951 beq(&done_checking);
1952 LoadMap(scratch, object);
1953 CompareInstanceType(scratch, scratch, ALLOCATION_SITE_TYPE);
1954 Assert(eq, AbortReason::kExpectedUndefinedOrCell);
1955 bind(&done_checking);
1956 }
1957 }
1958
1959 static const int kRegisterPassedArguments = 8;
1960
CalculateStackPassedWords(int num_reg_arguments,int num_double_arguments)1961 int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
1962 int num_double_arguments) {
1963 int stack_passed_words = 0;
1964 if (num_double_arguments > DoubleRegister::kNumRegisters) {
1965 stack_passed_words +=
1966 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
1967 }
1968 // Up to 8 simple arguments are passed in registers r3..r10.
1969 if (num_reg_arguments > kRegisterPassedArguments) {
1970 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
1971 }
1972 return stack_passed_words;
1973 }
1974
PrepareCallCFunction(int num_reg_arguments,int num_double_arguments,Register scratch)1975 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
1976 int num_double_arguments,
1977 Register scratch) {
1978 int frame_alignment = ActivationFrameAlignment();
1979 int stack_passed_arguments =
1980 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
1981 int stack_space = kNumRequiredStackFrameSlots;
1982
1983 if (frame_alignment > kSystemPointerSize) {
1984 // Make stack end at alignment and make room for stack arguments
1985 // -- preserving original value of sp.
1986 mr(scratch, sp);
1987 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kSystemPointerSize));
1988 DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
1989 ClearRightImm(sp, sp,
1990 Operand(base::bits::WhichPowerOfTwo(frame_alignment)));
1991 StoreP(scratch,
1992 MemOperand(sp, stack_passed_arguments * kSystemPointerSize));
1993 } else {
1994 // Make room for stack arguments
1995 stack_space += stack_passed_arguments;
1996 }
1997
1998 // Allocate frame with required slots to make ABI work.
1999 li(r0, Operand::Zero());
2000 StorePU(r0, MemOperand(sp, -stack_space * kSystemPointerSize));
2001 }
2002
PrepareCallCFunction(int num_reg_arguments,Register scratch)2003 void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
2004 Register scratch) {
2005 PrepareCallCFunction(num_reg_arguments, 0, scratch);
2006 }
2007
MovToFloatParameter(DoubleRegister src)2008 void TurboAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
2009
MovToFloatResult(DoubleRegister src)2010 void TurboAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
2011
MovToFloatParameters(DoubleRegister src1,DoubleRegister src2)2012 void TurboAssembler::MovToFloatParameters(DoubleRegister src1,
2013 DoubleRegister src2) {
2014 if (src2 == d1) {
2015 DCHECK(src1 != d2);
2016 Move(d2, src2);
2017 Move(d1, src1);
2018 } else {
2019 Move(d1, src1);
2020 Move(d2, src2);
2021 }
2022 }
2023
CallCFunction(ExternalReference function,int num_reg_arguments,int num_double_arguments,bool has_function_descriptor)2024 void TurboAssembler::CallCFunction(ExternalReference function,
2025 int num_reg_arguments,
2026 int num_double_arguments,
2027 bool has_function_descriptor) {
2028 Move(ip, function);
2029 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments,
2030 has_function_descriptor);
2031 }
2032
CallCFunction(Register function,int num_reg_arguments,int num_double_arguments,bool has_function_descriptor)2033 void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
2034 int num_double_arguments,
2035 bool has_function_descriptor) {
2036 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments,
2037 has_function_descriptor);
2038 }
2039
CallCFunction(ExternalReference function,int num_arguments,bool has_function_descriptor)2040 void TurboAssembler::CallCFunction(ExternalReference function,
2041 int num_arguments,
2042 bool has_function_descriptor) {
2043 CallCFunction(function, num_arguments, 0, has_function_descriptor);
2044 }
2045
CallCFunction(Register function,int num_arguments,bool has_function_descriptor)2046 void TurboAssembler::CallCFunction(Register function, int num_arguments,
2047 bool has_function_descriptor) {
2048 CallCFunction(function, num_arguments, 0, has_function_descriptor);
2049 }
2050
CallCFunctionHelper(Register function,int num_reg_arguments,int num_double_arguments,bool has_function_descriptor)2051 void TurboAssembler::CallCFunctionHelper(Register function,
2052 int num_reg_arguments,
2053 int num_double_arguments,
2054 bool has_function_descriptor) {
2055 DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
2056 DCHECK(has_frame());
2057
2058 // Save the frame pointer and PC so that the stack layout remains iterable,
2059 // even without an ExitFrame which normally exists between JS and C frames.
2060 Register addr_scratch = r7;
2061 Register scratch = r8;
2062 Push(scratch);
2063 mflr(scratch);
2064 // See x64 code for reasoning about how to address the isolate data fields.
2065 if (root_array_available()) {
2066 LoadPC(r0);
2067 StoreP(r0, MemOperand(kRootRegister,
2068 IsolateData::fast_c_call_caller_pc_offset()));
2069 StoreP(fp, MemOperand(kRootRegister,
2070 IsolateData::fast_c_call_caller_fp_offset()));
2071 } else {
2072 DCHECK_NOT_NULL(isolate());
2073 Push(addr_scratch);
2074
2075 Move(addr_scratch,
2076 ExternalReference::fast_c_call_caller_pc_address(isolate()));
2077 LoadPC(r0);
2078 StoreP(r0, MemOperand(addr_scratch));
2079 Move(addr_scratch,
2080 ExternalReference::fast_c_call_caller_fp_address(isolate()));
2081 StoreP(fp, MemOperand(addr_scratch));
2082 Pop(addr_scratch);
2083 }
2084 mtlr(scratch);
2085 Pop(scratch);
2086
2087 // Just call directly. The function called cannot cause a GC, or
2088 // allow preemption, so the return address in the link register
2089 // stays correct.
2090 Register dest = function;
2091 if (ABI_USES_FUNCTION_DESCRIPTORS && has_function_descriptor) {
2092 // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
2093 // aware of this descriptor and pick up values from it
2094 LoadP(ToRegister(ABI_TOC_REGISTER),
2095 MemOperand(function, kSystemPointerSize));
2096 LoadP(ip, MemOperand(function, 0));
2097 dest = ip;
2098 } else if (ABI_CALL_VIA_IP) {
2099 // pLinux and Simualtor, not AIX
2100 Move(ip, function);
2101 dest = ip;
2102 }
2103
2104 Call(dest);
2105
2106 // We don't unset the PC; the FP is the source of truth.
2107 Register zero_scratch = r0;
2108 mov(zero_scratch, Operand::Zero());
2109
2110 if (root_array_available()) {
2111 StoreP(
2112 zero_scratch,
2113 MemOperand(kRootRegister, IsolateData::fast_c_call_caller_fp_offset()));
2114 } else {
2115 DCHECK_NOT_NULL(isolate());
2116 Push(addr_scratch);
2117 Move(addr_scratch,
2118 ExternalReference::fast_c_call_caller_fp_address(isolate()));
2119 StoreP(zero_scratch, MemOperand(addr_scratch));
2120 Pop(addr_scratch);
2121 }
2122
2123 // Remove frame bought in PrepareCallCFunction
2124 int stack_passed_arguments =
2125 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
2126 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
2127 if (ActivationFrameAlignment() > kSystemPointerSize) {
2128 LoadP(sp, MemOperand(sp, stack_space * kSystemPointerSize));
2129 } else {
2130 addi(sp, sp, Operand(stack_space * kSystemPointerSize));
2131 }
2132 }
2133
CheckPageFlag(Register object,Register scratch,int mask,Condition cc,Label * condition_met)2134 void TurboAssembler::CheckPageFlag(
2135 Register object,
2136 Register scratch, // scratch may be same register as object
2137 int mask, Condition cc, Label* condition_met) {
2138 DCHECK(cc == ne || cc == eq);
2139 ClearRightImm(scratch, object, Operand(kPageSizeBits));
2140 LoadP(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
2141
2142 mov(r0, Operand(mask));
2143 and_(r0, scratch, r0, SetRC);
2144
2145 if (cc == ne) {
2146 bne(condition_met, cr0);
2147 }
2148 if (cc == eq) {
2149 beq(condition_met, cr0);
2150 }
2151 }
2152
SetRoundingMode(FPRoundingMode RN)2153 void TurboAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
2154
ResetRoundingMode()2155 void TurboAssembler::ResetRoundingMode() {
2156 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
2157 }
2158
2159 ////////////////////////////////////////////////////////////////////////////////
2160 //
2161 // New MacroAssembler Interfaces added for PPC
2162 //
2163 ////////////////////////////////////////////////////////////////////////////////
LoadIntLiteral(Register dst,int value)2164 void TurboAssembler::LoadIntLiteral(Register dst, int value) {
2165 mov(dst, Operand(value));
2166 }
2167
LoadSmiLiteral(Register dst,Smi smi)2168 void TurboAssembler::LoadSmiLiteral(Register dst, Smi smi) {
2169 mov(dst, Operand(smi));
2170 }
2171
LoadDoubleLiteral(DoubleRegister result,Double value,Register scratch)2172 void TurboAssembler::LoadDoubleLiteral(DoubleRegister result, Double value,
2173 Register scratch) {
2174 if (FLAG_enable_embedded_constant_pool && is_constant_pool_available() &&
2175 !(scratch == r0 && ConstantPoolAccessIsInOverflow())) {
2176 ConstantPoolEntry::Access access = ConstantPoolAddEntry(value);
2177 if (access == ConstantPoolEntry::OVERFLOWED) {
2178 addis(scratch, kConstantPoolRegister, Operand::Zero());
2179 lfd(result, MemOperand(scratch, 0));
2180 } else {
2181 lfd(result, MemOperand(kConstantPoolRegister, 0));
2182 }
2183 return;
2184 }
2185
2186 // avoid gcc strict aliasing error using union cast
2187 union {
2188 uint64_t dval;
2189 #if V8_TARGET_ARCH_PPC64
2190 intptr_t ival;
2191 #else
2192 intptr_t ival[2];
2193 #endif
2194 } litVal;
2195
2196 litVal.dval = value.AsUint64();
2197
2198 #if V8_TARGET_ARCH_PPC64
2199 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2200 mov(scratch, Operand(litVal.ival));
2201 mtfprd(result, scratch);
2202 return;
2203 }
2204 #endif
2205
2206 addi(sp, sp, Operand(-kDoubleSize));
2207 #if V8_TARGET_ARCH_PPC64
2208 mov(scratch, Operand(litVal.ival));
2209 std(scratch, MemOperand(sp));
2210 #else
2211 LoadIntLiteral(scratch, litVal.ival[0]);
2212 stw(scratch, MemOperand(sp, 0));
2213 LoadIntLiteral(scratch, litVal.ival[1]);
2214 stw(scratch, MemOperand(sp, 4));
2215 #endif
2216 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2217 lfd(result, MemOperand(sp, 0));
2218 addi(sp, sp, Operand(kDoubleSize));
2219 }
2220
MovIntToDouble(DoubleRegister dst,Register src,Register scratch)2221 void TurboAssembler::MovIntToDouble(DoubleRegister dst, Register src,
2222 Register scratch) {
2223 // sign-extend src to 64-bit
2224 #if V8_TARGET_ARCH_PPC64
2225 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2226 mtfprwa(dst, src);
2227 return;
2228 }
2229 #endif
2230
2231 DCHECK(src != scratch);
2232 subi(sp, sp, Operand(kDoubleSize));
2233 #if V8_TARGET_ARCH_PPC64
2234 extsw(scratch, src);
2235 std(scratch, MemOperand(sp, 0));
2236 #else
2237 srawi(scratch, src, 31);
2238 stw(scratch, MemOperand(sp, Register::kExponentOffset));
2239 stw(src, MemOperand(sp, Register::kMantissaOffset));
2240 #endif
2241 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2242 lfd(dst, MemOperand(sp, 0));
2243 addi(sp, sp, Operand(kDoubleSize));
2244 }
2245
MovUnsignedIntToDouble(DoubleRegister dst,Register src,Register scratch)2246 void TurboAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
2247 Register scratch) {
2248 // zero-extend src to 64-bit
2249 #if V8_TARGET_ARCH_PPC64
2250 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2251 mtfprwz(dst, src);
2252 return;
2253 }
2254 #endif
2255
2256 DCHECK(src != scratch);
2257 subi(sp, sp, Operand(kDoubleSize));
2258 #if V8_TARGET_ARCH_PPC64
2259 clrldi(scratch, src, Operand(32));
2260 std(scratch, MemOperand(sp, 0));
2261 #else
2262 li(scratch, Operand::Zero());
2263 stw(scratch, MemOperand(sp, Register::kExponentOffset));
2264 stw(src, MemOperand(sp, Register::kMantissaOffset));
2265 #endif
2266 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2267 lfd(dst, MemOperand(sp, 0));
2268 addi(sp, sp, Operand(kDoubleSize));
2269 }
2270
MovInt64ToDouble(DoubleRegister dst,Register src_hi,Register src)2271 void TurboAssembler::MovInt64ToDouble(DoubleRegister dst,
2272 #if !V8_TARGET_ARCH_PPC64
2273 Register src_hi,
2274 #endif
2275 Register src) {
2276 #if V8_TARGET_ARCH_PPC64
2277 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2278 mtfprd(dst, src);
2279 return;
2280 }
2281 #endif
2282
2283 subi(sp, sp, Operand(kDoubleSize));
2284 #if V8_TARGET_ARCH_PPC64
2285 std(src, MemOperand(sp, 0));
2286 #else
2287 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2288 stw(src, MemOperand(sp, Register::kMantissaOffset));
2289 #endif
2290 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2291 lfd(dst, MemOperand(sp, 0));
2292 addi(sp, sp, Operand(kDoubleSize));
2293 }
2294
2295 #if V8_TARGET_ARCH_PPC64
MovInt64ComponentsToDouble(DoubleRegister dst,Register src_hi,Register src_lo,Register scratch)2296 void TurboAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
2297 Register src_hi,
2298 Register src_lo,
2299 Register scratch) {
2300 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2301 sldi(scratch, src_hi, Operand(32));
2302 rldimi(scratch, src_lo, 0, 32);
2303 mtfprd(dst, scratch);
2304 return;
2305 }
2306
2307 subi(sp, sp, Operand(kDoubleSize));
2308 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
2309 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
2310 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2311 lfd(dst, MemOperand(sp));
2312 addi(sp, sp, Operand(kDoubleSize));
2313 }
2314 #endif
2315
InsertDoubleLow(DoubleRegister dst,Register src,Register scratch)2316 void TurboAssembler::InsertDoubleLow(DoubleRegister dst, Register src,
2317 Register scratch) {
2318 #if V8_TARGET_ARCH_PPC64
2319 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2320 mffprd(scratch, dst);
2321 rldimi(scratch, src, 0, 32);
2322 mtfprd(dst, scratch);
2323 return;
2324 }
2325 #endif
2326
2327 subi(sp, sp, Operand(kDoubleSize));
2328 stfd(dst, MemOperand(sp));
2329 stw(src, MemOperand(sp, Register::kMantissaOffset));
2330 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2331 lfd(dst, MemOperand(sp));
2332 addi(sp, sp, Operand(kDoubleSize));
2333 }
2334
InsertDoubleHigh(DoubleRegister dst,Register src,Register scratch)2335 void TurboAssembler::InsertDoubleHigh(DoubleRegister dst, Register src,
2336 Register scratch) {
2337 #if V8_TARGET_ARCH_PPC64
2338 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2339 mffprd(scratch, dst);
2340 rldimi(scratch, src, 32, 0);
2341 mtfprd(dst, scratch);
2342 return;
2343 }
2344 #endif
2345
2346 subi(sp, sp, Operand(kDoubleSize));
2347 stfd(dst, MemOperand(sp));
2348 stw(src, MemOperand(sp, Register::kExponentOffset));
2349 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2350 lfd(dst, MemOperand(sp));
2351 addi(sp, sp, Operand(kDoubleSize));
2352 }
2353
MovDoubleLowToInt(Register dst,DoubleRegister src)2354 void TurboAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
2355 #if V8_TARGET_ARCH_PPC64
2356 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2357 mffprwz(dst, src);
2358 return;
2359 }
2360 #endif
2361
2362 subi(sp, sp, Operand(kDoubleSize));
2363 stfd(src, MemOperand(sp));
2364 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2365 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2366 addi(sp, sp, Operand(kDoubleSize));
2367 }
2368
MovDoubleHighToInt(Register dst,DoubleRegister src)2369 void TurboAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
2370 #if V8_TARGET_ARCH_PPC64
2371 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2372 mffprd(dst, src);
2373 srdi(dst, dst, Operand(32));
2374 return;
2375 }
2376 #endif
2377
2378 subi(sp, sp, Operand(kDoubleSize));
2379 stfd(src, MemOperand(sp));
2380 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2381 lwz(dst, MemOperand(sp, Register::kExponentOffset));
2382 addi(sp, sp, Operand(kDoubleSize));
2383 }
2384
MovDoubleToInt64(Register dst_hi,Register dst,DoubleRegister src)2385 void TurboAssembler::MovDoubleToInt64(
2386 #if !V8_TARGET_ARCH_PPC64
2387 Register dst_hi,
2388 #endif
2389 Register dst, DoubleRegister src) {
2390 #if V8_TARGET_ARCH_PPC64
2391 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
2392 mffprd(dst, src);
2393 return;
2394 }
2395 #endif
2396
2397 subi(sp, sp, Operand(kDoubleSize));
2398 stfd(src, MemOperand(sp));
2399 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2400 #if V8_TARGET_ARCH_PPC64
2401 ld(dst, MemOperand(sp, 0));
2402 #else
2403 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
2404 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
2405 #endif
2406 addi(sp, sp, Operand(kDoubleSize));
2407 }
2408
MovIntToFloat(DoubleRegister dst,Register src)2409 void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
2410 subi(sp, sp, Operand(kFloatSize));
2411 stw(src, MemOperand(sp, 0));
2412 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2413 lfs(dst, MemOperand(sp, 0));
2414 addi(sp, sp, Operand(kFloatSize));
2415 }
2416
MovFloatToInt(Register dst,DoubleRegister src)2417 void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
2418 subi(sp, sp, Operand(kFloatSize));
2419 stfs(src, MemOperand(sp, 0));
2420 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
2421 lwz(dst, MemOperand(sp, 0));
2422 addi(sp, sp, Operand(kFloatSize));
2423 }
2424
Add(Register dst,Register src,intptr_t value,Register scratch)2425 void TurboAssembler::Add(Register dst, Register src, intptr_t value,
2426 Register scratch) {
2427 if (is_int16(value)) {
2428 addi(dst, src, Operand(value));
2429 } else {
2430 mov(scratch, Operand(value));
2431 add(dst, src, scratch);
2432 }
2433 }
2434
Cmpi(Register src1,const Operand & src2,Register scratch,CRegister cr)2435 void TurboAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
2436 CRegister cr) {
2437 intptr_t value = src2.immediate();
2438 if (is_int16(value)) {
2439 cmpi(src1, src2, cr);
2440 } else {
2441 mov(scratch, src2);
2442 cmp(src1, scratch, cr);
2443 }
2444 }
2445
Cmpli(Register src1,const Operand & src2,Register scratch,CRegister cr)2446 void TurboAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
2447 CRegister cr) {
2448 intptr_t value = src2.immediate();
2449 if (is_uint16(value)) {
2450 cmpli(src1, src2, cr);
2451 } else {
2452 mov(scratch, src2);
2453 cmpl(src1, scratch, cr);
2454 }
2455 }
2456
Cmpwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2457 void TurboAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
2458 CRegister cr) {
2459 intptr_t value = src2.immediate();
2460 if (is_int16(value)) {
2461 cmpwi(src1, src2, cr);
2462 } else {
2463 mov(scratch, src2);
2464 cmpw(src1, scratch, cr);
2465 }
2466 }
2467
Cmplwi(Register src1,const Operand & src2,Register scratch,CRegister cr)2468 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
2469 Register scratch, CRegister cr) {
2470 intptr_t value = src2.immediate();
2471 if (is_uint16(value)) {
2472 cmplwi(src1, src2, cr);
2473 } else {
2474 mov(scratch, src2);
2475 cmplw(src1, scratch, cr);
2476 }
2477 }
2478
And(Register ra,Register rs,const Operand & rb,RCBit rc)2479 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
2480 RCBit rc) {
2481 if (rb.is_reg()) {
2482 and_(ra, rs, rb.rm(), rc);
2483 } else {
2484 if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2485 rc == SetRC) {
2486 andi(ra, rs, rb);
2487 } else {
2488 // mov handles the relocation.
2489 DCHECK(rs != r0);
2490 mov(r0, rb);
2491 and_(ra, rs, r0, rc);
2492 }
2493 }
2494 }
2495
Or(Register ra,Register rs,const Operand & rb,RCBit rc)2496 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
2497 if (rb.is_reg()) {
2498 orx(ra, rs, rb.rm(), rc);
2499 } else {
2500 if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2501 rc == LeaveRC) {
2502 ori(ra, rs, rb);
2503 } else {
2504 // mov handles the relocation.
2505 DCHECK(rs != r0);
2506 mov(r0, rb);
2507 orx(ra, rs, r0, rc);
2508 }
2509 }
2510 }
2511
Xor(Register ra,Register rs,const Operand & rb,RCBit rc)2512 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
2513 RCBit rc) {
2514 if (rb.is_reg()) {
2515 xor_(ra, rs, rb.rm(), rc);
2516 } else {
2517 if (is_uint16(rb.immediate()) && RelocInfo::IsNone(rb.rmode_) &&
2518 rc == LeaveRC) {
2519 xori(ra, rs, rb);
2520 } else {
2521 // mov handles the relocation.
2522 DCHECK(rs != r0);
2523 mov(r0, rb);
2524 xor_(ra, rs, r0, rc);
2525 }
2526 }
2527 }
2528
CmpSmiLiteral(Register src1,Smi smi,Register scratch,CRegister cr)2529 void MacroAssembler::CmpSmiLiteral(Register src1, Smi smi, Register scratch,
2530 CRegister cr) {
2531 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2532 Cmpwi(src1, Operand(smi), scratch, cr);
2533 #else
2534 LoadSmiLiteral(scratch, smi);
2535 cmp(src1, scratch, cr);
2536 #endif
2537 }
2538
CmplSmiLiteral(Register src1,Smi smi,Register scratch,CRegister cr)2539 void MacroAssembler::CmplSmiLiteral(Register src1, Smi smi, Register scratch,
2540 CRegister cr) {
2541 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2542 Cmpli(src1, Operand(smi), scratch, cr);
2543 #else
2544 LoadSmiLiteral(scratch, smi);
2545 cmpl(src1, scratch, cr);
2546 #endif
2547 }
2548
AddSmiLiteral(Register dst,Register src,Smi smi,Register scratch)2549 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi smi,
2550 Register scratch) {
2551 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2552 Add(dst, src, static_cast<intptr_t>(smi.ptr()), scratch);
2553 #else
2554 LoadSmiLiteral(scratch, smi);
2555 add(dst, src, scratch);
2556 #endif
2557 }
2558
SubSmiLiteral(Register dst,Register src,Smi smi,Register scratch)2559 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi smi,
2560 Register scratch) {
2561 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2562 Add(dst, src, -(static_cast<intptr_t>(smi.ptr())), scratch);
2563 #else
2564 LoadSmiLiteral(scratch, smi);
2565 sub(dst, src, scratch);
2566 #endif
2567 }
2568
AndSmiLiteral(Register dst,Register src,Smi smi,Register scratch,RCBit rc)2569 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
2570 Register scratch, RCBit rc) {
2571 #if defined(V8_COMPRESS_POINTERS) || defined(V8_31BIT_SMIS_ON_64BIT_ARCH)
2572 And(dst, src, Operand(smi), rc);
2573 #else
2574 LoadSmiLiteral(scratch, smi);
2575 and_(dst, src, scratch, rc);
2576 #endif
2577 }
2578
2579 // Load a "pointer" sized value from the memory location
LoadP(Register dst,const MemOperand & mem,Register scratch)2580 void TurboAssembler::LoadP(Register dst, const MemOperand& mem,
2581 Register scratch) {
2582 DCHECK_EQ(mem.rb(), no_reg);
2583 int offset = mem.offset();
2584 int misaligned = (offset & 3);
2585 int adj = (offset & 3) - 4;
2586 int alignedOffset = (offset & ~3) + 4;
2587
2588 if (!is_int16(offset) || (misaligned && !is_int16(alignedOffset))) {
2589 /* cannot use d-form */
2590 mov(scratch, Operand(offset));
2591 LoadPX(dst, MemOperand(mem.ra(), scratch));
2592 } else {
2593 if (misaligned) {
2594 // adjust base to conform to offset alignment requirements
2595 // Todo: enhance to use scratch if dst is unsuitable
2596 DCHECK_NE(dst, r0);
2597 addi(dst, mem.ra(), Operand(adj));
2598 ld(dst, MemOperand(dst, alignedOffset));
2599 } else {
2600 ld(dst, mem);
2601 }
2602 }
2603 }
2604
LoadPU(Register dst,const MemOperand & mem,Register scratch)2605 void TurboAssembler::LoadPU(Register dst, const MemOperand& mem,
2606 Register scratch) {
2607 int offset = mem.offset();
2608
2609 if (!is_int16(offset)) {
2610 /* cannot use d-form */
2611 DCHECK(scratch != no_reg);
2612 mov(scratch, Operand(offset));
2613 LoadPUX(dst, MemOperand(mem.ra(), scratch));
2614 } else {
2615 #if V8_TARGET_ARCH_PPC64
2616 ldu(dst, mem);
2617 #else
2618 lwzu(dst, mem);
2619 #endif
2620 }
2621 }
2622
2623 // Store a "pointer" sized value to the memory location
StoreP(Register src,const MemOperand & mem,Register scratch)2624 void TurboAssembler::StoreP(Register src, const MemOperand& mem,
2625 Register scratch) {
2626 int offset = mem.offset();
2627
2628 if (!is_int16(offset)) {
2629 /* cannot use d-form */
2630 DCHECK(scratch != no_reg);
2631 mov(scratch, Operand(offset));
2632 StorePX(src, MemOperand(mem.ra(), scratch));
2633 } else {
2634 #if V8_TARGET_ARCH_PPC64
2635 int misaligned = (offset & 3);
2636 if (misaligned) {
2637 // adjust base to conform to offset alignment requirements
2638 // a suitable scratch is required here
2639 DCHECK(scratch != no_reg);
2640 if (scratch == r0) {
2641 LoadIntLiteral(scratch, offset);
2642 stdx(src, MemOperand(mem.ra(), scratch));
2643 } else {
2644 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
2645 std(src, MemOperand(scratch, (offset & ~3) + 4));
2646 }
2647 } else {
2648 std(src, mem);
2649 }
2650 #else
2651 stw(src, mem);
2652 #endif
2653 }
2654 }
2655
StorePU(Register src,const MemOperand & mem,Register scratch)2656 void TurboAssembler::StorePU(Register src, const MemOperand& mem,
2657 Register scratch) {
2658 int offset = mem.offset();
2659
2660 if (!is_int16(offset)) {
2661 /* cannot use d-form */
2662 DCHECK(scratch != no_reg);
2663 mov(scratch, Operand(offset));
2664 StorePUX(src, MemOperand(mem.ra(), scratch));
2665 } else {
2666 #if V8_TARGET_ARCH_PPC64
2667 stdu(src, mem);
2668 #else
2669 stwu(src, mem);
2670 #endif
2671 }
2672 }
2673
LoadWordArith(Register dst,const MemOperand & mem,Register scratch)2674 void TurboAssembler::LoadWordArith(Register dst, const MemOperand& mem,
2675 Register scratch) {
2676 int offset = mem.offset();
2677
2678 if (!is_int16(offset)) {
2679 DCHECK(scratch != no_reg);
2680 mov(scratch, Operand(offset));
2681 lwax(dst, MemOperand(mem.ra(), scratch));
2682 } else {
2683 #if V8_TARGET_ARCH_PPC64
2684 int misaligned = (offset & 3);
2685 if (misaligned) {
2686 // adjust base to conform to offset alignment requirements
2687 // Todo: enhance to use scratch if dst is unsuitable
2688 DCHECK(dst != r0);
2689 addi(dst, mem.ra(), Operand((offset & 3) - 4));
2690 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
2691 } else {
2692 lwa(dst, mem);
2693 }
2694 #else
2695 lwz(dst, mem);
2696 #endif
2697 }
2698 }
2699
2700 // Variable length depending on whether offset fits into immediate field
2701 // MemOperand currently only supports d-form
LoadWord(Register dst,const MemOperand & mem,Register scratch)2702 void TurboAssembler::LoadWord(Register dst, const MemOperand& mem,
2703 Register scratch) {
2704 Register base = mem.ra();
2705 int offset = mem.offset();
2706
2707 if (!is_int16(offset)) {
2708 LoadIntLiteral(scratch, offset);
2709 lwzx(dst, MemOperand(base, scratch));
2710 } else {
2711 lwz(dst, mem);
2712 }
2713 }
2714
2715 // Variable length depending on whether offset fits into immediate field
2716 // MemOperand current only supports d-form
StoreWord(Register src,const MemOperand & mem,Register scratch)2717 void TurboAssembler::StoreWord(Register src, const MemOperand& mem,
2718 Register scratch) {
2719 Register base = mem.ra();
2720 int offset = mem.offset();
2721
2722 if (!is_int16(offset)) {
2723 LoadIntLiteral(scratch, offset);
2724 stwx(src, MemOperand(base, scratch));
2725 } else {
2726 stw(src, mem);
2727 }
2728 }
2729
LoadHalfWordArith(Register dst,const MemOperand & mem,Register scratch)2730 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
2731 Register scratch) {
2732 int offset = mem.offset();
2733
2734 if (!is_int16(offset)) {
2735 DCHECK(scratch != no_reg);
2736 mov(scratch, Operand(offset));
2737 lhax(dst, MemOperand(mem.ra(), scratch));
2738 } else {
2739 lha(dst, mem);
2740 }
2741 }
2742
2743 // Variable length depending on whether offset fits into immediate field
2744 // MemOperand currently only supports d-form
LoadHalfWord(Register dst,const MemOperand & mem,Register scratch)2745 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
2746 Register scratch) {
2747 Register base = mem.ra();
2748 int offset = mem.offset();
2749
2750 if (!is_int16(offset)) {
2751 DCHECK_NE(scratch, no_reg);
2752 LoadIntLiteral(scratch, offset);
2753 lhzx(dst, MemOperand(base, scratch));
2754 } else {
2755 lhz(dst, mem);
2756 }
2757 }
2758
2759 // Variable length depending on whether offset fits into immediate field
2760 // MemOperand current only supports d-form
StoreHalfWord(Register src,const MemOperand & mem,Register scratch)2761 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
2762 Register scratch) {
2763 Register base = mem.ra();
2764 int offset = mem.offset();
2765
2766 if (!is_int16(offset)) {
2767 LoadIntLiteral(scratch, offset);
2768 sthx(src, MemOperand(base, scratch));
2769 } else {
2770 sth(src, mem);
2771 }
2772 }
2773
2774 // Variable length depending on whether offset fits into immediate field
2775 // MemOperand currently only supports d-form
LoadByte(Register dst,const MemOperand & mem,Register scratch)2776 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
2777 Register scratch) {
2778 Register base = mem.ra();
2779 int offset = mem.offset();
2780
2781 if (!is_int16(offset)) {
2782 LoadIntLiteral(scratch, offset);
2783 lbzx(dst, MemOperand(base, scratch));
2784 } else {
2785 lbz(dst, mem);
2786 }
2787 }
2788
2789 // Variable length depending on whether offset fits into immediate field
2790 // MemOperand current only supports d-form
StoreByte(Register src,const MemOperand & mem,Register scratch)2791 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
2792 Register scratch) {
2793 Register base = mem.ra();
2794 int offset = mem.offset();
2795
2796 if (!is_int16(offset)) {
2797 LoadIntLiteral(scratch, offset);
2798 stbx(src, MemOperand(base, scratch));
2799 } else {
2800 stb(src, mem);
2801 }
2802 }
2803
LoadDouble(DoubleRegister dst,const MemOperand & mem,Register scratch)2804 void TurboAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
2805 Register scratch) {
2806 Register base = mem.ra();
2807 int offset = mem.offset();
2808
2809 if (!is_int16(offset)) {
2810 mov(scratch, Operand(offset));
2811 lfdx(dst, MemOperand(base, scratch));
2812 } else {
2813 lfd(dst, mem);
2814 }
2815 }
2816
LoadFloat32(DoubleRegister dst,const MemOperand & mem,Register scratch)2817 void TurboAssembler::LoadFloat32(DoubleRegister dst, const MemOperand& mem,
2818 Register scratch) {
2819 Register base = mem.ra();
2820 int offset = mem.offset();
2821
2822 if (!is_int16(offset)) {
2823 mov(scratch, Operand(offset));
2824 lfsx(dst, MemOperand(base, scratch));
2825 } else {
2826 lfs(dst, mem);
2827 }
2828 }
2829
LoadDoubleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2830 void MacroAssembler::LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
2831 Register scratch) {
2832 Register base = mem.ra();
2833 int offset = mem.offset();
2834
2835 if (!is_int16(offset)) {
2836 mov(scratch, Operand(offset));
2837 lfdux(dst, MemOperand(base, scratch));
2838 } else {
2839 lfdu(dst, mem);
2840 }
2841 }
2842
LoadSingle(DoubleRegister dst,const MemOperand & mem,Register scratch)2843 void TurboAssembler::LoadSingle(DoubleRegister dst, const MemOperand& mem,
2844 Register scratch) {
2845 Register base = mem.ra();
2846 int offset = mem.offset();
2847
2848 if (!is_int16(offset)) {
2849 mov(scratch, Operand(offset));
2850 lfsx(dst, MemOperand(base, scratch));
2851 } else {
2852 lfs(dst, mem);
2853 }
2854 }
2855
LoadSingleU(DoubleRegister dst,const MemOperand & mem,Register scratch)2856 void TurboAssembler::LoadSingleU(DoubleRegister dst, const MemOperand& mem,
2857 Register scratch) {
2858 Register base = mem.ra();
2859 int offset = mem.offset();
2860
2861 if (!is_int16(offset)) {
2862 mov(scratch, Operand(offset));
2863 lfsux(dst, MemOperand(base, scratch));
2864 } else {
2865 lfsu(dst, mem);
2866 }
2867 }
2868
LoadSimd128(Simd128Register dst,const MemOperand & mem,Register ScratchReg,Simd128Register ScratchDoubleReg)2869 void TurboAssembler::LoadSimd128(Simd128Register dst, const MemOperand& mem,
2870 Register ScratchReg,
2871 Simd128Register ScratchDoubleReg) {
2872 // lvx needs the stack to be 16 byte aligned.
2873 // We first use lxvd/stxvd to copy the content on an aligned address. lxvd
2874 // itself reverses the lanes so it cannot be used as is.
2875 lxvd(ScratchDoubleReg, mem);
2876 mr(ScratchReg, sp);
2877 ClearRightImm(
2878 sp, sp,
2879 Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
2880 addi(sp, sp, Operand(-16));
2881 stxvd(kScratchDoubleReg, MemOperand(r0, sp));
2882 // Load it with correct lane ordering.
2883 lvx(dst, MemOperand(r0, sp));
2884 mr(sp, ScratchReg);
2885 }
2886
StoreDouble(DoubleRegister src,const MemOperand & mem,Register scratch)2887 void TurboAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
2888 Register scratch) {
2889 Register base = mem.ra();
2890 int offset = mem.offset();
2891
2892 if (!is_int16(offset)) {
2893 mov(scratch, Operand(offset));
2894 stfdx(src, MemOperand(base, scratch));
2895 } else {
2896 stfd(src, mem);
2897 }
2898 }
2899
StoreDoubleU(DoubleRegister src,const MemOperand & mem,Register scratch)2900 void TurboAssembler::StoreDoubleU(DoubleRegister src, const MemOperand& mem,
2901 Register scratch) {
2902 Register base = mem.ra();
2903 int offset = mem.offset();
2904
2905 if (!is_int16(offset)) {
2906 mov(scratch, Operand(offset));
2907 stfdux(src, MemOperand(base, scratch));
2908 } else {
2909 stfdu(src, mem);
2910 }
2911 }
2912
StoreSingle(DoubleRegister src,const MemOperand & mem,Register scratch)2913 void TurboAssembler::StoreSingle(DoubleRegister src, const MemOperand& mem,
2914 Register scratch) {
2915 Register base = mem.ra();
2916 int offset = mem.offset();
2917
2918 if (!is_int16(offset)) {
2919 mov(scratch, Operand(offset));
2920 stfsx(src, MemOperand(base, scratch));
2921 } else {
2922 stfs(src, mem);
2923 }
2924 }
2925
StoreSingleU(DoubleRegister src,const MemOperand & mem,Register scratch)2926 void TurboAssembler::StoreSingleU(DoubleRegister src, const MemOperand& mem,
2927 Register scratch) {
2928 Register base = mem.ra();
2929 int offset = mem.offset();
2930
2931 if (!is_int16(offset)) {
2932 mov(scratch, Operand(offset));
2933 stfsux(src, MemOperand(base, scratch));
2934 } else {
2935 stfsu(src, mem);
2936 }
2937 }
2938
StoreSimd128(Simd128Register src,const MemOperand & mem,Register ScratchReg,Simd128Register ScratchDoubleReg)2939 void TurboAssembler::StoreSimd128(Simd128Register src, const MemOperand& mem,
2940 Register ScratchReg,
2941 Simd128Register ScratchDoubleReg) {
2942 // stvx needs the stack to be 16 byte aligned.
2943 // We use lxvd/stxvd to store the content on an aligned address. stxvd
2944 // itself reverses the lanes so it cannot be used as is.
2945 mr(ScratchReg, sp);
2946 ClearRightImm(
2947 sp, sp,
2948 Operand(base::bits::WhichPowerOfTwo(16))); // equivalent to &= -16
2949 addi(sp, sp, Operand(-16));
2950 stvx(src, MemOperand(r0, sp));
2951 lxvd(ScratchDoubleReg, MemOperand(r0, sp));
2952 mr(sp, ScratchReg);
2953 stxvd(ScratchDoubleReg, mem);
2954 }
2955
GetRegisterThatIsNotOneOf(Register reg1,Register reg2,Register reg3,Register reg4,Register reg5,Register reg6)2956 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
2957 Register reg4, Register reg5,
2958 Register reg6) {
2959 RegList regs = 0;
2960 if (reg1.is_valid()) regs |= reg1.bit();
2961 if (reg2.is_valid()) regs |= reg2.bit();
2962 if (reg3.is_valid()) regs |= reg3.bit();
2963 if (reg4.is_valid()) regs |= reg4.bit();
2964 if (reg5.is_valid()) regs |= reg5.bit();
2965 if (reg6.is_valid()) regs |= reg6.bit();
2966
2967 const RegisterConfiguration* config = RegisterConfiguration::Default();
2968 for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
2969 int code = config->GetAllocatableGeneralCode(i);
2970 Register candidate = Register::from_code(code);
2971 if (regs & candidate.bit()) continue;
2972 return candidate;
2973 }
2974 UNREACHABLE();
2975 }
2976
SwapP(Register src,Register dst,Register scratch)2977 void TurboAssembler::SwapP(Register src, Register dst, Register scratch) {
2978 if (src == dst) return;
2979 DCHECK(!AreAliased(src, dst, scratch));
2980 mr(scratch, src);
2981 mr(src, dst);
2982 mr(dst, scratch);
2983 }
2984
SwapP(Register src,MemOperand dst,Register scratch)2985 void TurboAssembler::SwapP(Register src, MemOperand dst, Register scratch) {
2986 if (dst.ra() != r0 && dst.ra().is_valid())
2987 DCHECK(!AreAliased(src, dst.ra(), scratch));
2988 if (dst.rb() != r0 && dst.rb().is_valid())
2989 DCHECK(!AreAliased(src, dst.rb(), scratch));
2990 DCHECK(!AreAliased(src, scratch));
2991 mr(scratch, src);
2992 LoadP(src, dst, r0);
2993 StoreP(scratch, dst, r0);
2994 }
2995
SwapP(MemOperand src,MemOperand dst,Register scratch_0,Register scratch_1)2996 void TurboAssembler::SwapP(MemOperand src, MemOperand dst, Register scratch_0,
2997 Register scratch_1) {
2998 if (src.ra() != r0 && src.ra().is_valid())
2999 DCHECK(!AreAliased(src.ra(), scratch_0, scratch_1));
3000 if (src.rb() != r0 && src.rb().is_valid())
3001 DCHECK(!AreAliased(src.rb(), scratch_0, scratch_1));
3002 if (dst.ra() != r0 && dst.ra().is_valid())
3003 DCHECK(!AreAliased(dst.ra(), scratch_0, scratch_1));
3004 if (dst.rb() != r0 && dst.rb().is_valid())
3005 DCHECK(!AreAliased(dst.rb(), scratch_0, scratch_1));
3006 DCHECK(!AreAliased(scratch_0, scratch_1));
3007 if (is_int16(src.offset()) || is_int16(dst.offset())) {
3008 if (!is_int16(src.offset())) {
3009 // swap operand
3010 MemOperand temp = src;
3011 src = dst;
3012 dst = temp;
3013 }
3014 LoadP(scratch_1, dst, scratch_0);
3015 LoadP(scratch_0, src);
3016 StoreP(scratch_1, src);
3017 StoreP(scratch_0, dst, scratch_1);
3018 } else {
3019 LoadP(scratch_1, dst, scratch_0);
3020 push(scratch_1);
3021 LoadP(scratch_0, src, scratch_1);
3022 StoreP(scratch_0, dst, scratch_1);
3023 pop(scratch_1);
3024 StoreP(scratch_1, src, scratch_0);
3025 }
3026 }
3027
SwapFloat32(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)3028 void TurboAssembler::SwapFloat32(DoubleRegister src, DoubleRegister dst,
3029 DoubleRegister scratch) {
3030 if (src == dst) return;
3031 DCHECK(!AreAliased(src, dst, scratch));
3032 fmr(scratch, src);
3033 fmr(src, dst);
3034 fmr(dst, scratch);
3035 }
3036
SwapFloat32(DoubleRegister src,MemOperand dst,DoubleRegister scratch)3037 void TurboAssembler::SwapFloat32(DoubleRegister src, MemOperand dst,
3038 DoubleRegister scratch) {
3039 DCHECK(!AreAliased(src, scratch));
3040 fmr(scratch, src);
3041 LoadSingle(src, dst, r0);
3042 StoreSingle(scratch, dst, r0);
3043 }
3044
SwapFloat32(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)3045 void TurboAssembler::SwapFloat32(MemOperand src, MemOperand dst,
3046 DoubleRegister scratch_0,
3047 DoubleRegister scratch_1) {
3048 DCHECK(!AreAliased(scratch_0, scratch_1));
3049 LoadSingle(scratch_0, src, r0);
3050 LoadSingle(scratch_1, dst, r0);
3051 StoreSingle(scratch_0, dst, r0);
3052 StoreSingle(scratch_1, src, r0);
3053 }
3054
SwapDouble(DoubleRegister src,DoubleRegister dst,DoubleRegister scratch)3055 void TurboAssembler::SwapDouble(DoubleRegister src, DoubleRegister dst,
3056 DoubleRegister scratch) {
3057 if (src == dst) return;
3058 DCHECK(!AreAliased(src, dst, scratch));
3059 fmr(scratch, src);
3060 fmr(src, dst);
3061 fmr(dst, scratch);
3062 }
3063
SwapDouble(DoubleRegister src,MemOperand dst,DoubleRegister scratch)3064 void TurboAssembler::SwapDouble(DoubleRegister src, MemOperand dst,
3065 DoubleRegister scratch) {
3066 DCHECK(!AreAliased(src, scratch));
3067 fmr(scratch, src);
3068 LoadDouble(src, dst, r0);
3069 StoreDouble(scratch, dst, r0);
3070 }
3071
SwapDouble(MemOperand src,MemOperand dst,DoubleRegister scratch_0,DoubleRegister scratch_1)3072 void TurboAssembler::SwapDouble(MemOperand src, MemOperand dst,
3073 DoubleRegister scratch_0,
3074 DoubleRegister scratch_1) {
3075 DCHECK(!AreAliased(scratch_0, scratch_1));
3076 LoadDouble(scratch_0, src, r0);
3077 LoadDouble(scratch_1, dst, r0);
3078 StoreDouble(scratch_0, dst, r0);
3079 StoreDouble(scratch_1, src, r0);
3080 }
3081
SwapSimd128(Simd128Register src,Simd128Register dst,Simd128Register scratch)3082 void TurboAssembler::SwapSimd128(Simd128Register src, Simd128Register dst,
3083 Simd128Register scratch) {
3084 if (src == dst) return;
3085 vor(scratch, src, src);
3086 vor(src, dst, dst);
3087 vor(dst, scratch, scratch);
3088 }
3089
SwapSimd128(Simd128Register src,MemOperand dst,Simd128Register scratch)3090 void TurboAssembler::SwapSimd128(Simd128Register src, MemOperand dst,
3091 Simd128Register scratch) {
3092 DCHECK(!AreAliased(src, scratch));
3093 // push d0, to be used as scratch
3094 addi(sp, sp, Operand(-kSimd128Size));
3095 StoreSimd128(d0, MemOperand(r0, sp), r0, scratch);
3096 mov(ip, Operand(dst.offset()));
3097 LoadSimd128(d0, MemOperand(dst.ra(), ip), r0, scratch);
3098 StoreSimd128(src, MemOperand(dst.ra(), ip), r0, scratch);
3099 vor(src, d0, d0);
3100 // restore d0
3101 LoadSimd128(d0, MemOperand(r0, sp), ip, scratch);
3102 addi(sp, sp, Operand(kSimd128Size));
3103 }
3104
SwapSimd128(MemOperand src,MemOperand dst,Simd128Register scratch)3105 void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
3106 Simd128Register scratch) {
3107 // push d0 and d1, to be used as scratch
3108 addi(sp, sp, Operand(2 * -kSimd128Size));
3109 StoreSimd128(d0, MemOperand(r0, sp), ip, scratch);
3110 li(ip, Operand(kSimd128Size));
3111 StoreSimd128(d1, MemOperand(ip, sp), r0, scratch);
3112
3113 mov(ip, Operand(src.offset()));
3114 LoadSimd128(d0, MemOperand(src.ra(), ip), r0, scratch);
3115 mov(ip, Operand(dst.offset()));
3116 LoadSimd128(d1, MemOperand(dst.ra(), ip), r0, scratch);
3117
3118 StoreSimd128(d0, MemOperand(dst.ra(), ip), r0, scratch);
3119 mov(ip, Operand(src.offset()));
3120 StoreSimd128(d1, MemOperand(src.ra(), ip), r0, scratch);
3121
3122 // restore d0 and d1
3123 LoadSimd128(d0, MemOperand(r0, sp), ip, scratch);
3124 li(ip, Operand(kSimd128Size));
3125 LoadSimd128(d1, MemOperand(ip, sp), r0, scratch);
3126 addi(sp, sp, Operand(2 * kSimd128Size));
3127 }
3128
ResetSpeculationPoisonRegister()3129 void TurboAssembler::ResetSpeculationPoisonRegister() {
3130 mov(kSpeculationPoisonRegister, Operand(-1));
3131 }
3132
JumpIfEqual(Register x,int32_t y,Label * dest)3133 void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
3134 Cmpi(x, Operand(y), r0);
3135 beq(dest);
3136 }
3137
JumpIfLessThan(Register x,int32_t y,Label * dest)3138 void TurboAssembler::JumpIfLessThan(Register x, int32_t y, Label* dest) {
3139 Cmpi(x, Operand(y), r0);
3140 blt(dest);
3141 }
3142
LoadEntryFromBuiltinIndex(Register builtin_index)3143 void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
3144 STATIC_ASSERT(kSystemPointerSize == 8);
3145 STATIC_ASSERT(kSmiTagSize == 1);
3146 STATIC_ASSERT(kSmiTag == 0);
3147
3148 // The builtin_index register contains the builtin index as a Smi.
3149 if (SmiValuesAre32Bits()) {
3150 ShiftRightArithImm(builtin_index, builtin_index,
3151 kSmiShift - kSystemPointerSizeLog2);
3152 } else {
3153 DCHECK(SmiValuesAre31Bits());
3154 ShiftLeftImm(builtin_index, builtin_index,
3155 Operand(kSystemPointerSizeLog2 - kSmiShift));
3156 }
3157 addi(builtin_index, builtin_index,
3158 Operand(IsolateData::builtin_entry_table_offset()));
3159 LoadPX(builtin_index, MemOperand(kRootRegister, builtin_index));
3160 }
3161
CallBuiltinByIndex(Register builtin_index)3162 void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
3163 LoadEntryFromBuiltinIndex(builtin_index);
3164 Call(builtin_index);
3165 }
3166
LoadCodeObjectEntry(Register destination,Register code_object)3167 void TurboAssembler::LoadCodeObjectEntry(Register destination,
3168 Register code_object) {
3169 // Code objects are called differently depending on whether we are generating
3170 // builtin code (which will later be embedded into the binary) or compiling
3171 // user JS code at runtime.
3172 // * Builtin code runs in --jitless mode and thus must not call into on-heap
3173 // Code targets. Instead, we dispatch through the builtins entry table.
3174 // * Codegen at runtime does not have this restriction and we can use the
3175 // shorter, branchless instruction sequence. The assumption here is that
3176 // targets are usually generated code and not builtin Code objects.
3177
3178 if (options().isolate_independent_code) {
3179 DCHECK(root_array_available());
3180 Label if_code_is_off_heap, out;
3181
3182 Register scratch = r11;
3183
3184 DCHECK(!AreAliased(destination, scratch));
3185 DCHECK(!AreAliased(code_object, scratch));
3186
3187 // Check whether the Code object is an off-heap trampoline. If so, call its
3188 // (off-heap) entry point directly without going through the (on-heap)
3189 // trampoline. Otherwise, just call the Code object as always.
3190 LoadWordArith(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
3191 mov(r0, Operand(Code::IsOffHeapTrampoline::kMask));
3192 and_(r0, scratch, r0, SetRC);
3193 bne(&if_code_is_off_heap, cr0);
3194
3195 // Not an off-heap trampoline, the entry point is at
3196 // Code::raw_instruction_start().
3197 addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3198 b(&out);
3199
3200 // An off-heap trampoline, the entry point is loaded from the builtin entry
3201 // table.
3202 bind(&if_code_is_off_heap);
3203 LoadWordArith(scratch,
3204 FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
3205 ShiftLeftImm(destination, scratch, Operand(kSystemPointerSizeLog2));
3206 add(destination, destination, kRootRegister);
3207 LoadP(destination,
3208 MemOperand(destination, IsolateData::builtin_entry_table_offset()),
3209 r0);
3210
3211 bind(&out);
3212 } else {
3213 addi(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
3214 }
3215 }
3216
CallCodeObject(Register code_object)3217 void TurboAssembler::CallCodeObject(Register code_object) {
3218 LoadCodeObjectEntry(code_object, code_object);
3219 Call(code_object);
3220 }
3221
JumpCodeObject(Register code_object)3222 void TurboAssembler::JumpCodeObject(Register code_object) {
3223 LoadCodeObjectEntry(code_object, code_object);
3224 Jump(code_object);
3225 }
3226
StoreReturnAddressAndCall(Register target)3227 void TurboAssembler::StoreReturnAddressAndCall(Register target) {
3228 // This generates the final instruction sequence for calls to C functions
3229 // once an exit frame has been constructed.
3230 //
3231 // Note that this assumes the caller code (i.e. the Code object currently
3232 // being generated) is immovable or that the callee function cannot trigger
3233 // GC, since the callee function will return to it.
3234
3235 static constexpr int after_call_offset = 5 * kInstrSize;
3236 Label start_call;
3237 Register dest = target;
3238
3239 if (ABI_USES_FUNCTION_DESCRIPTORS) {
3240 // AIX/PPC64BE Linux uses a function descriptor. When calling C code be
3241 // aware of this descriptor and pick up values from it
3242 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kSystemPointerSize));
3243 LoadP(ip, MemOperand(target, 0));
3244 dest = ip;
3245 } else if (ABI_CALL_VIA_IP && dest != ip) {
3246 Move(ip, target);
3247 dest = ip;
3248 }
3249
3250 LoadPC(r7);
3251 bind(&start_call);
3252 addi(r7, r7, Operand(after_call_offset));
3253 StoreP(r7, MemOperand(sp, kStackFrameExtraParamSlot * kSystemPointerSize));
3254 Call(dest);
3255
3256 DCHECK_EQ(after_call_offset - kInstrSize,
3257 SizeOfCodeGeneratedSince(&start_call));
3258 }
3259
CallForDeoptimization(Builtins::Name target,int,Label * exit,DeoptimizeKind kind,Label *)3260 void TurboAssembler::CallForDeoptimization(Builtins::Name target, int,
3261 Label* exit, DeoptimizeKind kind,
3262 Label*) {
3263 LoadP(ip, MemOperand(kRootRegister,
3264 IsolateData::builtin_entry_slot_offset(target)));
3265 Call(ip);
3266 DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
3267 (kind == DeoptimizeKind::kLazy)
3268 ? Deoptimizer::kLazyDeoptExitSize
3269 : Deoptimizer::kNonLazyDeoptExitSize);
3270 USE(exit, kind);
3271 }
3272
ZeroExtByte(Register dst,Register src)3273 void TurboAssembler::ZeroExtByte(Register dst, Register src) {
3274 clrldi(dst, src, Operand(56));
3275 }
3276
ZeroExtHalfWord(Register dst,Register src)3277 void TurboAssembler::ZeroExtHalfWord(Register dst, Register src) {
3278 clrldi(dst, src, Operand(48));
3279 }
3280
ZeroExtWord32(Register dst,Register src)3281 void TurboAssembler::ZeroExtWord32(Register dst, Register src) {
3282 clrldi(dst, src, Operand(32));
3283 }
3284
Trap()3285 void TurboAssembler::Trap() { stop(); }
DebugBreak()3286 void TurboAssembler::DebugBreak() { stop(); }
3287
3288 } // namespace internal
3289 } // namespace v8
3290
3291 #endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
3292