1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
6 #define V8_PPC_MACRO_ASSEMBLER_PPC_H_
7
8 #include "src/assembler.h"
9 #include "src/bailout-reason.h"
10 #include "src/double.h"
11 #include "src/globals.h"
12 #include "src/ppc/assembler-ppc.h"
13 #include "src/turbo-assembler.h"
14
15 namespace v8 {
16 namespace internal {
17
18 // Give alias names to registers for calling conventions.
19 constexpr Register kReturnRegister0 = r3;
20 constexpr Register kReturnRegister1 = r4;
21 constexpr Register kReturnRegister2 = r5;
22 constexpr Register kJSFunctionRegister = r4;
23 constexpr Register kContextRegister = r30;
24 constexpr Register kAllocateSizeRegister = r4;
25 constexpr Register kSpeculationPoisonRegister = r14;
26 constexpr Register kInterpreterAccumulatorRegister = r3;
27 constexpr Register kInterpreterBytecodeOffsetRegister = r15;
28 constexpr Register kInterpreterBytecodeArrayRegister = r16;
29 constexpr Register kInterpreterDispatchTableRegister = r17;
30
31 constexpr Register kJavaScriptCallArgCountRegister = r3;
32 constexpr Register kJavaScriptCallCodeStartRegister = r5;
33 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
34 constexpr Register kJavaScriptCallNewTargetRegister = r6;
35 constexpr Register kJavaScriptCallExtraArg1Register = r5;
36
37 constexpr Register kOffHeapTrampolineRegister = ip;
38 constexpr Register kRuntimeCallFunctionRegister = r4;
39 constexpr Register kRuntimeCallArgCountRegister = r3;
40 constexpr Register kRuntimeCallArgvRegister = r5;
41 constexpr Register kWasmInstanceRegister = r10;
42
43 // ----------------------------------------------------------------------------
44 // Static helper functions
45
46 // Generate a MemOperand for loading a field from an object.
FieldMemOperand(Register object,int offset)47 inline MemOperand FieldMemOperand(Register object, int offset) {
48 return MemOperand(object, offset - kHeapObjectTag);
49 }
50
51 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
52 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
53 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
54
55
56 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
57 Register reg3 = no_reg,
58 Register reg4 = no_reg,
59 Register reg5 = no_reg,
60 Register reg6 = no_reg);
61
62 // These exist to provide portability between 32 and 64bit
63 #if V8_TARGET_ARCH_PPC64
64 #define LoadPX ldx
65 #define LoadPUX ldux
66 #define StorePX stdx
67 #define StorePUX stdux
68 #define ShiftLeftImm sldi
69 #define ShiftRightImm srdi
70 #define ClearLeftImm clrldi
71 #define ClearRightImm clrrdi
72 #define ShiftRightArithImm sradi
73 #define ShiftLeft_ sld
74 #define ShiftRight_ srd
75 #define ShiftRightArith srad
76 #else
77 #define LoadPX lwzx
78 #define LoadPUX lwzux
79 #define StorePX stwx
80 #define StorePUX stwux
81 #define ShiftLeftImm slwi
82 #define ShiftRightImm srwi
83 #define ClearLeftImm clrlwi
84 #define ClearRightImm clrrwi
85 #define ShiftRightArithImm srawi
86 #define ShiftLeft_ slw
87 #define ShiftRight_ srw
88 #define ShiftRightArith sraw
89 #endif
90
91 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
92 public:
TurboAssembler(Isolate * isolate,const AssemblerOptions & options,void * buffer,int buffer_size,CodeObjectRequired create_code_object)93 TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
94 void* buffer, int buffer_size,
95 CodeObjectRequired create_code_object)
96 : TurboAssemblerBase(isolate, options, buffer, buffer_size,
97 create_code_object) {}
98
99 // Converts the integer (untagged smi) in |src| to a double, storing
100 // the result to |dst|
101 void ConvertIntToDouble(Register src, DoubleRegister dst);
102
103 // Converts the unsigned integer (untagged smi) in |src| to
104 // a double, storing the result to |dst|
105 void ConvertUnsignedIntToDouble(Register src, DoubleRegister dst);
106
107 // Converts the integer (untagged smi) in |src| to
108 // a float, storing the result in |dst|
109 void ConvertIntToFloat(Register src, DoubleRegister dst);
110
111 // Converts the unsigned integer (untagged smi) in |src| to
112 // a float, storing the result in |dst|
113 void ConvertUnsignedIntToFloat(Register src, DoubleRegister dst);
114
115 #if V8_TARGET_ARCH_PPC64
116 void ConvertInt64ToFloat(Register src, DoubleRegister double_dst);
117 void ConvertInt64ToDouble(Register src, DoubleRegister double_dst);
118 void ConvertUnsignedInt64ToFloat(Register src, DoubleRegister double_dst);
119 void ConvertUnsignedInt64ToDouble(Register src, DoubleRegister double_dst);
120 #endif
121
122 // Converts the double_input to an integer. Note that, upon return,
123 // the contents of double_dst will also hold the fixed point representation.
124 void ConvertDoubleToInt64(const DoubleRegister double_input,
125 #if !V8_TARGET_ARCH_PPC64
126 const Register dst_hi,
127 #endif
128 const Register dst, const DoubleRegister double_dst,
129 FPRoundingMode rounding_mode = kRoundToZero);
130
131 #if V8_TARGET_ARCH_PPC64
132 // Converts the double_input to an unsigned integer. Note that, upon return,
133 // the contents of double_dst will also hold the fixed point representation.
134 void ConvertDoubleToUnsignedInt64(
135 const DoubleRegister double_input, const Register dst,
136 const DoubleRegister double_dst,
137 FPRoundingMode rounding_mode = kRoundToZero);
138 #endif
139
140 // Activation support.
141 void EnterFrame(StackFrame::Type type,
142 bool load_constant_pool_pointer_reg = false);
143
144 // Returns the pc offset at which the frame ends.
145 int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
146
147 // Push a fixed frame, consisting of lr, fp, constant pool.
148 void PushCommonFrame(Register marker_reg = no_reg);
149
150 // Generates function and stub prologue code.
151 void StubPrologue(StackFrame::Type type);
152 void Prologue();
153
154 // Push a standard frame, consisting of lr, fp, constant pool,
155 // context and JS function
156 void PushStandardFrame(Register function_reg);
157
158 // Restore caller's frame pointer and return address prior to being
159 // overwritten by tail call stack preparation.
160 void RestoreFrameStateForTailCall();
161
162 // Get the actual activation frame alignment for target environment.
163 static int ActivationFrameAlignment();
164
InitializeRootRegister()165 void InitializeRootRegister() {
166 ExternalReference roots_array_start =
167 ExternalReference::roots_array_start(isolate());
168 mov(kRootRegister, Operand(roots_array_start));
169 addi(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
170 }
171
172 // These exist to provide portability between 32 and 64bit
173 void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
174 void LoadPU(Register dst, const MemOperand& mem, Register scratch = no_reg);
175 void LoadWordArith(Register dst, const MemOperand& mem,
176 Register scratch = no_reg);
177 void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
178 void StorePU(Register src, const MemOperand& mem, Register scratch = no_reg);
179
180 void LoadDouble(DoubleRegister dst, const MemOperand& mem,
181 Register scratch = no_reg);
182 void LoadDoubleLiteral(DoubleRegister result, Double value, Register scratch);
183
184 // load a literal signed int value <value> to GPR <dst>
185 void LoadIntLiteral(Register dst, int value);
186 // load an SMI value <value> to GPR <dst>
187 void LoadSmiLiteral(Register dst, Smi* smi);
188
189 void LoadSingle(DoubleRegister dst, const MemOperand& mem,
190 Register scratch = no_reg);
191 void LoadSingleU(DoubleRegister dst, const MemOperand& mem,
192 Register scratch = no_reg);
193 void LoadPC(Register dst);
194 void ComputeCodeStartAddress(Register dst);
195
196 void StoreDouble(DoubleRegister src, const MemOperand& mem,
197 Register scratch = no_reg);
198 void StoreDoubleU(DoubleRegister src, const MemOperand& mem,
199 Register scratch = no_reg);
200
201 void StoreSingle(DoubleRegister src, const MemOperand& mem,
202 Register scratch = no_reg);
203 void StoreSingleU(DoubleRegister src, const MemOperand& mem,
204 Register scratch = no_reg);
205
206 void Cmpi(Register src1, const Operand& src2, Register scratch,
207 CRegister cr = cr7);
208 void Cmpli(Register src1, const Operand& src2, Register scratch,
209 CRegister cr = cr7);
210 void Cmpwi(Register src1, const Operand& src2, Register scratch,
211 CRegister cr = cr7);
212 // Set new rounding mode RN to FPSCR
213 void SetRoundingMode(FPRoundingMode RN);
214
215 // reset rounding mode to default (kRoundToNearest)
216 void ResetRoundingMode();
217 void Add(Register dst, Register src, intptr_t value, Register scratch);
218
Push(Register src)219 void Push(Register src) { push(src); }
220 // Push a handle.
221 void Push(Handle<HeapObject> handle);
222 void Push(Smi* smi);
223
224 // Push two registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2)225 void Push(Register src1, Register src2) {
226 StorePU(src2, MemOperand(sp, -2 * kPointerSize));
227 StoreP(src1, MemOperand(sp, kPointerSize));
228 }
229
230 // Push three registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3)231 void Push(Register src1, Register src2, Register src3) {
232 StorePU(src3, MemOperand(sp, -3 * kPointerSize));
233 StoreP(src2, MemOperand(sp, kPointerSize));
234 StoreP(src1, MemOperand(sp, 2 * kPointerSize));
235 }
236
237 // Push four registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4)238 void Push(Register src1, Register src2, Register src3, Register src4) {
239 StorePU(src4, MemOperand(sp, -4 * kPointerSize));
240 StoreP(src3, MemOperand(sp, kPointerSize));
241 StoreP(src2, MemOperand(sp, 2 * kPointerSize));
242 StoreP(src1, MemOperand(sp, 3 * kPointerSize));
243 }
244
245 // Push five registers. Pushes leftmost register first (to highest address).
Push(Register src1,Register src2,Register src3,Register src4,Register src5)246 void Push(Register src1, Register src2, Register src3, Register src4,
247 Register src5) {
248 StorePU(src5, MemOperand(sp, -5 * kPointerSize));
249 StoreP(src4, MemOperand(sp, kPointerSize));
250 StoreP(src3, MemOperand(sp, 2 * kPointerSize));
251 StoreP(src2, MemOperand(sp, 3 * kPointerSize));
252 StoreP(src1, MemOperand(sp, 4 * kPointerSize));
253 }
254
Pop(Register dst)255 void Pop(Register dst) { pop(dst); }
256
257 // Pop two registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2)258 void Pop(Register src1, Register src2) {
259 LoadP(src2, MemOperand(sp, 0));
260 LoadP(src1, MemOperand(sp, kPointerSize));
261 addi(sp, sp, Operand(2 * kPointerSize));
262 }
263
264 // Pop three registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3)265 void Pop(Register src1, Register src2, Register src3) {
266 LoadP(src3, MemOperand(sp, 0));
267 LoadP(src2, MemOperand(sp, kPointerSize));
268 LoadP(src1, MemOperand(sp, 2 * kPointerSize));
269 addi(sp, sp, Operand(3 * kPointerSize));
270 }
271
272 // Pop four registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4)273 void Pop(Register src1, Register src2, Register src3, Register src4) {
274 LoadP(src4, MemOperand(sp, 0));
275 LoadP(src3, MemOperand(sp, kPointerSize));
276 LoadP(src2, MemOperand(sp, 2 * kPointerSize));
277 LoadP(src1, MemOperand(sp, 3 * kPointerSize));
278 addi(sp, sp, Operand(4 * kPointerSize));
279 }
280
281 // Pop five registers. Pops rightmost register first (from lower address).
Pop(Register src1,Register src2,Register src3,Register src4,Register src5)282 void Pop(Register src1, Register src2, Register src3, Register src4,
283 Register src5) {
284 LoadP(src5, MemOperand(sp, 0));
285 LoadP(src4, MemOperand(sp, kPointerSize));
286 LoadP(src3, MemOperand(sp, 2 * kPointerSize));
287 LoadP(src2, MemOperand(sp, 3 * kPointerSize));
288 LoadP(src1, MemOperand(sp, 4 * kPointerSize));
289 addi(sp, sp, Operand(5 * kPointerSize));
290 }
291
292 void SaveRegisters(RegList registers);
293 void RestoreRegisters(RegList registers);
294
295 void CallRecordWriteStub(Register object, Register address,
296 RememberedSetAction remembered_set_action,
297 SaveFPRegsMode fp_mode);
298
299 void MultiPush(RegList regs, Register location = sp);
300 void MultiPop(RegList regs, Register location = sp);
301
302 void MultiPushDoubles(RegList dregs, Register location = sp);
303 void MultiPopDoubles(RegList dregs, Register location = sp);
304
305 // Calculate how much stack space (in bytes) are required to store caller
306 // registers excluding those specified in the arguments.
307 int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
308 Register exclusion1 = no_reg,
309 Register exclusion2 = no_reg,
310 Register exclusion3 = no_reg) const;
311
312 // Push caller saved registers on the stack, and return the number of bytes
313 // stack pointer is adjusted.
314 int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
315 Register exclusion2 = no_reg,
316 Register exclusion3 = no_reg);
317 // Restore caller saved registers from the stack, and return the number of
318 // bytes stack pointer is adjusted.
319 int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
320 Register exclusion2 = no_reg,
321 Register exclusion3 = no_reg);
322
323 // Load an object from the root table.
LoadRoot(Register destination,Heap::RootListIndex index)324 void LoadRoot(Register destination, Heap::RootListIndex index) override {
325 LoadRoot(destination, index, al);
326 }
327 void LoadRoot(Register destination, Heap::RootListIndex index,
328 Condition cond);
329
330 void SwapP(Register src, Register dst, Register scratch);
331 void SwapP(Register src, MemOperand dst, Register scratch);
332 void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
333 Register scratch_1);
334 void SwapFloat32(DoubleRegister src, DoubleRegister dst,
335 DoubleRegister scratch);
336 void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
337 void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
338 DoubleRegister scratch_1);
339 void SwapDouble(DoubleRegister src, DoubleRegister dst,
340 DoubleRegister scratch);
341 void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
342 void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
343 DoubleRegister scratch_1);
344
345 // Before calling a C-function from generated code, align arguments on stack.
346 // After aligning the frame, non-register arguments must be stored in
347 // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
348 // are word sized. If double arguments are used, this function assumes that
349 // all double arguments are stored before core registers; otherwise the
350 // correct alignment of the double values is not guaranteed.
351 // Some compilers/platforms require the stack to be aligned when calling
352 // C++ code.
353 // Needs a scratch register to do some arithmetic. This register will be
354 // trashed.
355 void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
356 Register scratch);
357 void PrepareCallCFunction(int num_reg_arguments, Register scratch);
358
359 void PrepareForTailCall(const ParameterCount& callee_args_count,
360 Register caller_args_count_reg, Register scratch0,
361 Register scratch1);
362
363 // There are two ways of passing double arguments on ARM, depending on
364 // whether soft or hard floating point ABI is used. These functions
365 // abstract parameter passing for the three different ways we call
366 // C functions from generated code.
367 void MovToFloatParameter(DoubleRegister src);
368 void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
369 void MovToFloatResult(DoubleRegister src);
370
371 // Calls a C function and cleans up the space for arguments allocated
372 // by PrepareCallCFunction. The called function is not allowed to trigger a
373 // garbage collection, since that might move the code and invalidate the
374 // return address (unless this is somehow accounted for by the called
375 // function).
376 void CallCFunction(ExternalReference function, int num_arguments);
377 void CallCFunction(Register function, int num_arguments);
378 void CallCFunction(ExternalReference function, int num_reg_arguments,
379 int num_double_arguments);
380 void CallCFunction(Register function, int num_reg_arguments,
381 int num_double_arguments);
382
383 // Call a runtime routine. This expects {centry} to contain a fitting CEntry
384 // builtin for the target runtime function and uses an indirect call.
385 void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
386
387 void MovFromFloatParameter(DoubleRegister dst);
388 void MovFromFloatResult(DoubleRegister dst);
389
390 // Calls Abort(msg) if the condition cond is not satisfied.
391 // Use --debug_code to enable.
392 void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
393
394 // Like Assert(), but always enabled.
395 void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
396
397 // Print a message to stdout and abort execution.
398 void Abort(AbortReason reason);
399
400 inline bool AllowThisStubCall(CodeStub* stub);
401 #if !V8_TARGET_ARCH_PPC64
402 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
403 Register src_high, Register scratch, Register shift);
404 void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
405 Register src_high, uint32_t shift);
406 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
407 Register src_high, Register scratch, Register shift);
408 void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
409 Register src_high, uint32_t shift);
410 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
411 Register src_high, Register scratch, Register shift);
412 void ShiftRightAlgPair(Register dst_low, Register dst_high, Register src_low,
413 Register src_high, uint32_t shift);
414 #endif
415
416 void LoadFromConstantsTable(Register destination,
417 int constant_index) override;
418 void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
419 void LoadRootRelative(Register destination, int32_t offset) override;
420
421 // Jump, Call, and Ret pseudo instructions implementing inter-working.
422 void Jump(Register target);
423 void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
424 CRegister cr = cr7);
425 void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al,
426 CRegister cr = cr7);
427 void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
428 CRegister cr = cr7);
429 void Call(Register target);
430 void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
431 void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
432 Condition cond = al);
433 void Call(Label* target);
434
CallForDeoptimization(Address target,int deopt_id,RelocInfo::Mode rmode)435 void CallForDeoptimization(Address target, int deopt_id,
436 RelocInfo::Mode rmode) {
437 USE(deopt_id);
438 Call(target, rmode);
439 }
440
441 // Emit code to discard a non-negative number of pointer-sized elements
442 // from the stack, clobbering only the sp register.
443 void Drop(int count);
444 void Drop(Register count, Register scratch = r0);
445
Ret()446 void Ret() { blr(); }
447 void Ret(Condition cond, CRegister cr = cr7) { bclr(cond, cr); }
Ret(int drop)448 void Ret(int drop) {
449 Drop(drop);
450 blr();
451 }
452
453 // If the value is a NaN, canonicalize the value else, do nothing.
454 void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
CanonicalizeNaN(const DoubleRegister value)455 void CanonicalizeNaN(const DoubleRegister value) {
456 CanonicalizeNaN(value, value);
457 }
458 void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
459 Label* condition_met);
460
461 // Move values between integer and floating point registers.
462 void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
463 void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
464 Register scratch);
465 void MovInt64ToDouble(DoubleRegister dst,
466 #if !V8_TARGET_ARCH_PPC64
467 Register src_hi,
468 #endif
469 Register src);
470 #if V8_TARGET_ARCH_PPC64
471 void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
472 Register src_lo, Register scratch);
473 #endif
474 void InsertDoubleLow(DoubleRegister dst, Register src, Register scratch);
475 void InsertDoubleHigh(DoubleRegister dst, Register src, Register scratch);
476 void MovDoubleLowToInt(Register dst, DoubleRegister src);
477 void MovDoubleHighToInt(Register dst, DoubleRegister src);
478 void MovDoubleToInt64(
479 #if !V8_TARGET_ARCH_PPC64
480 Register dst_hi,
481 #endif
482 Register dst, DoubleRegister src);
483 void MovIntToFloat(DoubleRegister dst, Register src);
484 void MovFloatToInt(Register dst, DoubleRegister src);
485 // Register move. May do nothing if the registers are identical.
Move(Register dst,Smi * smi)486 void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
487 void Move(Register dst, Handle<HeapObject> value);
488 void Move(Register dst, ExternalReference reference);
489 void Move(Register dst, Register src, Condition cond = al);
490 void Move(DoubleRegister dst, DoubleRegister src);
491
492 void SmiUntag(Register reg, RCBit rc = LeaveRC, int scale = 0) {
493 SmiUntag(reg, reg, rc, scale);
494 }
495
496 void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC, int scale = 0) {
497 if (scale > kSmiShift) {
498 ShiftLeftImm(dst, src, Operand(scale - kSmiShift), rc);
499 } else if (scale < kSmiShift) {
500 ShiftRightArithImm(dst, src, kSmiShift - scale, rc);
501 } else {
502 // do nothing
503 }
504 }
505 // ---------------------------------------------------------------------------
506 // Bit testing/extraction
507 //
508 // Bit numbering is such that the least significant bit is bit 0
509 // (for consistency between 32/64-bit).
510
511 // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
512 // and, if !test, shift them into the least significant bits of dst.
513 inline void ExtractBitRange(Register dst, Register src, int rangeStart,
514 int rangeEnd, RCBit rc = LeaveRC,
515 bool test = false) {
516 DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
517 int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
518 int width = rangeStart - rangeEnd + 1;
519 if (rc == SetRC && rangeStart < 16 && (rangeEnd == 0 || test)) {
520 // Prefer faster andi when applicable.
521 andi(dst, src, Operand(((1 << width) - 1) << rangeEnd));
522 } else {
523 #if V8_TARGET_ARCH_PPC64
524 rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
525 #else
526 rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1,
527 rc);
528 #endif
529 }
530 }
531
532 inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
533 RCBit rc = LeaveRC, bool test = false) {
534 ExtractBitRange(dst, src, bitNumber, bitNumber, rc, test);
535 }
536
537 // Extract consecutive bits (defined by mask) from src and place them
538 // into the least significant bits of dst.
539 inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
540 RCBit rc = LeaveRC, bool test = false) {
541 int start = kBitsPerPointer - 1;
542 int end;
543 uintptr_t bit = (1L << start);
544
545 while (bit && (mask & bit) == 0) {
546 start--;
547 bit >>= 1;
548 }
549 end = start;
550 bit >>= 1;
551
552 while (bit && (mask & bit)) {
553 end--;
554 bit >>= 1;
555 }
556
557 // 1-bits in mask must be contiguous
558 DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
559
560 ExtractBitRange(dst, src, start, end, rc, test);
561 }
562
563 // Test single bit in value.
564 inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
565 ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC, true);
566 }
567
568 // Test consecutive bit range in value. Range is defined by mask.
569 inline void TestBitMask(Register value, uintptr_t mask,
570 Register scratch = r0) {
571 ExtractBitMask(scratch, value, mask, SetRC, true);
572 }
573 // Test consecutive bit range in value. Range is defined by
574 // rangeStart - rangeEnd.
575 inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
576 Register scratch = r0) {
577 ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC, true);
578 }
579
TestIfSmi(Register value,Register scratch)580 inline void TestIfSmi(Register value, Register scratch) {
581 TestBitRange(value, kSmiTagSize - 1, 0, scratch);
582 }
583 // Jump the register contains a smi.
JumpIfSmi(Register value,Label * smi_label)584 inline void JumpIfSmi(Register value, Label* smi_label) {
585 TestIfSmi(value, r0);
586 beq(smi_label, cr0); // branch if SMI
587 }
588 void JumpIfEqual(Register x, int32_t y, Label* dest);
589 void JumpIfLessThan(Register x, int32_t y, Label* dest);
590
591 #if V8_TARGET_ARCH_PPC64
592 inline void TestIfInt32(Register value, Register scratch,
593 CRegister cr = cr7) {
594 // High bits must be identical to fit into an 32-bit integer
595 extsw(scratch, value);
596 cmp(scratch, value, cr);
597 }
598 #else
599 inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
600 CRegister cr = cr7) {
601 // High bits must be identical to fit into an 32-bit integer
602 srawi(scratch, lo_word, 31);
603 cmp(scratch, hi_word, cr);
604 }
605 #endif
606
607 // Overflow handling functions.
608 // Usage: call the appropriate arithmetic function and then call one of the
609 // flow control functions with the corresponding label.
610
611 // Compute dst = left + right, setting condition codes. dst may be same as
612 // either left or right (or a unique register). left and right must not be
613 // the same register.
614 void AddAndCheckForOverflow(Register dst, Register left, Register right,
615 Register overflow_dst, Register scratch = r0);
616 void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
617 Register overflow_dst, Register scratch = r0);
618
619 // Compute dst = left - right, setting condition codes. dst may be same as
620 // either left or right (or a unique register). left and right must not be
621 // the same register.
622 void SubAndCheckForOverflow(Register dst, Register left, Register right,
623 Register overflow_dst, Register scratch = r0);
624
625 // Performs a truncating conversion of a floating point number as used by
626 // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
627 // succeeds, otherwise falls through if result is saturated. On return
628 // 'result' either holds answer, or is clobbered on fall through.
629 //
630 // Only public for the test code in test-code-stubs-arm.cc.
631 void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
632 Label* done);
633 void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
634 DoubleRegister double_input, StubCallMode stub_mode);
635
636 // Call a code stub.
637 void CallStubDelayed(CodeStub* stub);
638
639 void LoadConstantPoolPointerRegister();
640
641 // Loads the constant pool pointer (kConstantPoolRegister).
642 void LoadConstantPoolPointerRegisterFromCodeTargetAddress(
643 Register code_target_address);
AbortConstantPoolBuilding()644 void AbortConstantPoolBuilding() {
645 #ifdef DEBUG
646 // Avoid DCHECK(!is_linked()) failure in ~Label()
647 bind(ConstantPoolPosition());
648 #endif
649 }
650
651 void ResetSpeculationPoisonRegister();
652
653 private:
654 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
655
656 int CalculateStackPassedWords(int num_reg_arguments,
657 int num_double_arguments);
658 void CallCFunctionHelper(Register function, int num_reg_arguments,
659 int num_double_arguments);
660 };
661
662 // MacroAssembler implements a collection of frequently used acros.
663 class MacroAssembler : public TurboAssembler {
664 public:
MacroAssembler(Isolate * isolate,void * buffer,int size,CodeObjectRequired create_code_object)665 MacroAssembler(Isolate* isolate, void* buffer, int size,
666 CodeObjectRequired create_code_object)
667 : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
668 size, create_code_object) {}
669 MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
670 void* buffer, int size, CodeObjectRequired create_code_object);
671
672 // ---------------------------------------------------------------------------
673 // GC Support
674
675 void IncrementalMarkingRecordWriteHelper(Register object, Register value,
676 Register address);
677
678 void JumpToJSEntry(Register target);
679 // Check if object is in new space. Jumps if the object is not in new space.
680 // The register scratch can be object itself, but scratch will be clobbered.
JumpIfNotInNewSpace(Register object,Register scratch,Label * branch)681 void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
682 InNewSpace(object, scratch, eq, branch);
683 }
684
685 // Check if object is in new space. Jumps if the object is in new space.
686 // The register scratch can be object itself, but it will be clobbered.
JumpIfInNewSpace(Register object,Register scratch,Label * branch)687 void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
688 InNewSpace(object, scratch, ne, branch);
689 }
690
691 // Check if an object has a given incremental marking color.
692 void HasColor(Register object, Register scratch0, Register scratch1,
693 Label* has_color, int first_bit, int second_bit);
694
695 void JumpIfBlack(Register object, Register scratch0, Register scratch1,
696 Label* on_black);
697
698 // Checks the color of an object. If the object is white we jump to the
699 // incremental marker.
700 void JumpIfWhite(Register value, Register scratch1, Register scratch2,
701 Register scratch3, Label* value_is_white);
702
703 // Notify the garbage collector that we wrote a pointer into an object.
704 // |object| is the object being stored into, |value| is the object being
705 // stored. value and scratch registers are clobbered by the operation.
706 // The offset is the offset from the start of the object, not the offset from
707 // the tagged HeapObject pointer. For use with FieldMemOperand(reg, off).
708 void RecordWriteField(
709 Register object, int offset, Register value, Register scratch,
710 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
711 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
712 SmiCheck smi_check = INLINE_SMI_CHECK);
713
714 // For a given |object| notify the garbage collector that the slot |address|
715 // has been written. |value| is the object being stored. The value and
716 // address registers are clobbered by the operation.
717 void RecordWrite(
718 Register object, Register address, Register value,
719 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
720 RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
721 SmiCheck smi_check = INLINE_SMI_CHECK);
722
723 // Push and pop the registers that can hold pointers, as defined by the
724 // RegList constant kSafepointSavedRegisters.
725 void PushSafepointRegisters();
726 void PopSafepointRegisters();
727
728 // Enter exit frame.
729 // stack_space - extra stack space, used for parameters before call to C.
730 // At least one slot (for the return address) should be provided.
731 void EnterExitFrame(bool save_doubles, int stack_space = 1,
732 StackFrame::Type frame_type = StackFrame::EXIT);
733
734 // Leave the current exit frame. Expects the return value in r0.
735 // Expect the number of values, pushed prior to the exit frame, to
736 // remove in a register (or no_reg, if there is nothing to remove).
737 void LeaveExitFrame(bool save_doubles, Register argument_count,
738 bool argument_count_is_length = false);
739
740 // Load the global proxy from the current context.
LoadGlobalProxy(Register dst)741 void LoadGlobalProxy(Register dst) {
742 LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
743 }
744
745 void LoadNativeContextSlot(int index, Register dst);
746
747 // ----------------------------------------------------------------
748 // new PPC macro-assembler interfaces that are slightly higher level
749 // than assembler-ppc and may generate variable length sequences
750
751 // load a literal double value <value> to FPR <result>
752 void LoadWord(Register dst, const MemOperand& mem, Register scratch);
753 void StoreWord(Register src, const MemOperand& mem, Register scratch);
754
755 void LoadHalfWord(Register dst, const MemOperand& mem,
756 Register scratch = no_reg);
757 void LoadHalfWordArith(Register dst, const MemOperand& mem,
758 Register scratch = no_reg);
759 void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
760
761 void LoadByte(Register dst, const MemOperand& mem, Register scratch);
762 void StoreByte(Register src, const MemOperand& mem, Register scratch);
763
764 void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
765 Register scratch = no_reg);
766 void StoreRepresentation(Register src, const MemOperand& mem,
767 Representation r, Register scratch = no_reg);
768 void LoadDoubleU(DoubleRegister dst, const MemOperand& mem,
769 Register scratch = no_reg);
770
771 void Cmplwi(Register src1, const Operand& src2, Register scratch,
772 CRegister cr = cr7);
773 void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
774 void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
775 void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
776
777 void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
778 void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
779 void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
780 CRegister cr = cr7);
781 void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
782 CRegister cr = cr7);
783 void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
784 RCBit rc = LeaveRC);
785
786
787
788 // ---------------------------------------------------------------------------
789 // JavaScript invokes
790
791 // Removes current frame and its arguments from the stack preserving
792 // the arguments and a return address pushed to the stack for the next call.
793 // Both |callee_args_count| and |caller_args_count_reg| do not include
794 // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
795 // is trashed.
796
797 // Invoke the JavaScript function code by either calling or jumping.
798 void InvokeFunctionCode(Register function, Register new_target,
799 const ParameterCount& expected,
800 const ParameterCount& actual, InvokeFlag flag);
801
802 // On function call, call into the debugger if necessary.
803 void CheckDebugHook(Register fun, Register new_target,
804 const ParameterCount& expected,
805 const ParameterCount& actual);
806
807 // Invoke the JavaScript function in the given register. Changes the
808 // current context to the context in the function before invoking.
809 void InvokeFunction(Register function, Register new_target,
810 const ParameterCount& actual, InvokeFlag flag);
811
812 void InvokeFunction(Register function, const ParameterCount& expected,
813 const ParameterCount& actual, InvokeFlag flag);
814
815 void DebugBreak();
816 // Frame restart support
817 void MaybeDropFrames();
818
819 // Exception handling
820
821 // Push a new stack handler and link into stack handler chain.
822 void PushStackHandler();
823
824 // Unlink the stack handler on top of the stack from the stack handler chain.
825 // Must preserve the result register.
826 void PopStackHandler();
827
828 // ---------------------------------------------------------------------------
829 // Support functions.
830
831 // Compare object type for heap object. heap_object contains a non-Smi
832 // whose object type should be compared with the given type. This both
833 // sets the flags and leaves the object type in the type_reg register.
834 // It leaves the map in the map register (unless the type_reg and map register
835 // are the same register). It leaves the heap object in the heap_object
836 // register unless the heap_object register is the same register as one of the
837 // other registers.
838 // Type_reg can be no_reg. In that case ip is used.
839 void CompareObjectType(Register heap_object, Register map, Register type_reg,
840 InstanceType type);
841
842 // Compare instance type in a map. map contains a valid map object whose
843 // object type should be compared with the given type. This both
844 // sets the flags and leaves the object type in the type_reg register.
845 void CompareInstanceType(Register map, Register type_reg, InstanceType type);
846
847 // Compare the object in a register to a value from the root list.
848 // Uses the ip register as scratch.
849 void CompareRoot(Register obj, Heap::RootListIndex index);
PushRoot(Heap::RootListIndex index)850 void PushRoot(Heap::RootListIndex index) {
851 LoadRoot(r0, index);
852 Push(r0);
853 }
854
855 // Compare the object in a register to a value and jump if they are equal.
JumpIfRoot(Register with,Heap::RootListIndex index,Label * if_equal)856 void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
857 CompareRoot(with, index);
858 beq(if_equal);
859 }
860
861 // Compare the object in a register to a value and jump if they are not equal.
JumpIfNotRoot(Register with,Heap::RootListIndex index,Label * if_not_equal)862 void JumpIfNotRoot(Register with, Heap::RootListIndex index,
863 Label* if_not_equal) {
864 CompareRoot(with, index);
865 bne(if_not_equal);
866 }
867
868 // Try to convert a double to a signed 32-bit integer.
869 // CR_EQ in cr7 is set and result assigned if the conversion is exact.
870 void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
871 Register scratch, DoubleRegister double_scratch);
872
873 // ---------------------------------------------------------------------------
874 // Runtime calls
875
876 static int CallSizeNotPredictableCodeSize(Address target,
877 RelocInfo::Mode rmode,
878 Condition cond = al);
879 void CallJSEntry(Register target);
880
881 // Call a code stub.
882 void CallStub(CodeStub* stub, Condition cond = al);
883 void TailCallStub(CodeStub* stub, Condition cond = al);
884
885 // Call a runtime routine.
886 void CallRuntime(const Runtime::Function* f, int num_arguments,
887 SaveFPRegsMode save_doubles = kDontSaveFPRegs);
CallRuntimeSaveDoubles(Runtime::FunctionId fid)888 void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
889 const Runtime::Function* function = Runtime::FunctionForId(fid);
890 CallRuntime(function, function->nargs, kSaveFPRegs);
891 }
892
893 // Convenience function: Same as above, but takes the fid instead.
894 void CallRuntime(Runtime::FunctionId fid,
895 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
896 const Runtime::Function* function = Runtime::FunctionForId(fid);
897 CallRuntime(function, function->nargs, save_doubles);
898 }
899
900 // Convenience function: Same as above, but takes the fid instead.
901 void CallRuntime(Runtime::FunctionId fid, int num_arguments,
902 SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
903 CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
904 }
905
906 // Convenience function: tail call a runtime routine (jump).
907 void TailCallRuntime(Runtime::FunctionId fid);
908
909
910
911 // Jump to a runtime routine.
912 void JumpToExternalReference(const ExternalReference& builtin,
913 bool builtin_exit_frame = false);
914
915 // Generates a trampoline to jump to the off-heap instruction stream.
916 void JumpToInstructionStream(Address entry);
917
918 // ---------------------------------------------------------------------------
919 // In-place weak references.
920 void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
921
922 // ---------------------------------------------------------------------------
923 // StatsCounter support
924
925 void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
926 Register scratch2);
927 void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
928 Register scratch2);
929
930 // ---------------------------------------------------------------------------
931 // Smi utilities
932
933 // Shift left by kSmiShift
934 void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
935 void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
936 ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
937 }
938
SmiToPtrArrayOffset(Register dst,Register src)939 void SmiToPtrArrayOffset(Register dst, Register src) {
940 #if V8_TARGET_ARCH_PPC64
941 STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
942 ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
943 #else
944 STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
945 ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
946 #endif
947 }
948
949 // Untag the source value into destination and jump if source is a smi.
950 // Souce and destination can be the same register.
951 void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
952
953 // Jump if either of the registers contain a non-smi.
JumpIfNotSmi(Register value,Label * not_smi_label)954 inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
955 TestIfSmi(value, r0);
956 bne(not_smi_label, cr0);
957 }
958 // Jump if either of the registers contain a smi.
959 void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
960
961 // Abort execution if argument is a smi, enabled via --debug-code.
962 void AssertNotSmi(Register object);
963 void AssertSmi(Register object);
964
965
966
967 #if V8_TARGET_ARCH_PPC64
968 // Ensure it is permissible to read/write int value directly from
969 // upper half of the smi.
970 STATIC_ASSERT(kSmiTag == 0);
971 STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
972 #endif
973 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
974 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
975 #else
976 #define SmiWordOffset(offset) offset
977 #endif
978
979 // Abort execution if argument is not a Constructor, enabled via --debug-code.
980 void AssertConstructor(Register object);
981
982 // Abort execution if argument is not a JSFunction, enabled via --debug-code.
983 void AssertFunction(Register object);
984
985 // Abort execution if argument is not a JSBoundFunction,
986 // enabled via --debug-code.
987 void AssertBoundFunction(Register object);
988
989 // Abort execution if argument is not a JSGeneratorObject (or subclass),
990 // enabled via --debug-code.
991 void AssertGeneratorObject(Register object);
992
993 // Abort execution if argument is not undefined or an AllocationSite, enabled
994 // via --debug-code.
995 void AssertUndefinedOrAllocationSite(Register object, Register scratch);
996
997 // ---------------------------------------------------------------------------
998 // Patching helpers.
999
1000 template <typename Field>
1001 void DecodeField(Register dst, Register src, RCBit rc = LeaveRC) {
1002 ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift,
1003 rc);
1004 }
1005
1006 template <typename Field>
1007 void DecodeField(Register reg, RCBit rc = LeaveRC) {
1008 DecodeField<Field>(reg, reg, rc);
1009 }
1010
1011 private:
1012 static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
1013
1014 // Helper functions for generating invokes.
1015 void InvokePrologue(const ParameterCount& expected,
1016 const ParameterCount& actual, Label* done,
1017 bool* definitely_mismatches, InvokeFlag flag);
1018
1019 // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
1020 void InNewSpace(Register object, Register scratch,
1021 Condition cond, // eq for new space, ne otherwise.
1022 Label* branch);
1023
1024 // Compute memory operands for safepoint stack slots.
1025 static int SafepointRegisterStackIndex(int reg_code);
1026
1027 // Needs access to SafepointRegisterStackIndex for compiled frame
1028 // traversal.
1029 friend class StandardFrame;
1030 };
1031
1032 // -----------------------------------------------------------------------------
1033 // Static helper functions.
1034
1035 inline MemOperand ContextMemOperand(Register context, int index = 0) {
1036 return MemOperand(context, Context::SlotOffset(index));
1037 }
1038
1039
NativeContextMemOperand()1040 inline MemOperand NativeContextMemOperand() {
1041 return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
1042 }
1043
1044 #define ACCESS_MASM(masm) masm->
1045
1046 } // namespace internal
1047 } // namespace v8
1048
1049 #endif // V8_PPC_MACRO_ASSEMBLER_PPC_H_
1050