1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36
37 // A light-weight PPC Assembler
38 // Generates user mode instructions for the PPC architecture up
39
40 #ifndef V8_PPC_ASSEMBLER_PPC_H_
41 #define V8_PPC_ASSEMBLER_PPC_H_
42
43 #include <stdio.h>
44 #include <vector>
45
46 #include "src/assembler.h"
47 #include "src/double.h"
48 #include "src/ppc/constants-ppc.h"
49
50 #if V8_HOST_ARCH_PPC && \
51 (V8_OS_AIX || (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN))
52 #define ABI_USES_FUNCTION_DESCRIPTORS 1
53 #else
54 #define ABI_USES_FUNCTION_DESCRIPTORS 0
55 #endif
56
57 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
58 #define ABI_PASSES_HANDLES_IN_REGS 1
59 #else
60 #define ABI_PASSES_HANDLES_IN_REGS 0
61 #endif
62
63 #if !V8_HOST_ARCH_PPC || !V8_TARGET_ARCH_PPC64 || V8_TARGET_LITTLE_ENDIAN
64 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 1
65 #else
66 #define ABI_RETURNS_OBJECT_PAIRS_IN_REGS 0
67 #endif
68
69 #if !V8_HOST_ARCH_PPC || (V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
70 #define ABI_CALL_VIA_IP 1
71 #else
72 #define ABI_CALL_VIA_IP 0
73 #endif
74
75 #if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
76 #define ABI_TOC_REGISTER 2
77 #else
78 #define ABI_TOC_REGISTER 13
79 #endif
80
81 #define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
82
83 namespace v8 {
84 namespace internal {
85
86 // clang-format off
87 #define GENERAL_REGISTERS(V) \
88 V(r0) V(sp) V(r2) V(r3) V(r4) V(r5) V(r6) V(r7) \
89 V(r8) V(r9) V(r10) V(r11) V(ip) V(r13) V(r14) V(r15) \
90 V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
91 V(r24) V(r25) V(r26) V(r27) V(r28) V(r29) V(r30) V(fp)
92
93 #if V8_EMBEDDED_CONSTANT_POOL
94 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
95 V(r3) V(r4) V(r5) V(r6) V(r7) \
96 V(r8) V(r9) V(r10) V(r14) V(r15) \
97 V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
98 V(r24) V(r25) V(r26) V(r27) V(r30)
99 #else
100 #define ALLOCATABLE_GENERAL_REGISTERS(V) \
101 V(r3) V(r4) V(r5) V(r6) V(r7) \
102 V(r8) V(r9) V(r10) V(r14) V(r15) \
103 V(r16) V(r17) V(r18) V(r19) V(r20) V(r21) V(r22) V(r23) \
104 V(r24) V(r25) V(r26) V(r27) V(r28) V(r30)
105 #endif
106
107 #define LOW_DOUBLE_REGISTERS(V) \
108 V(d0) V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
109 V(d8) V(d9) V(d10) V(d11) V(d12) V(d13) V(d14) V(d15)
110
111 #define NON_LOW_DOUBLE_REGISTERS(V) \
112 V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
113 V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
114
115 #define DOUBLE_REGISTERS(V) \
116 LOW_DOUBLE_REGISTERS(V) NON_LOW_DOUBLE_REGISTERS(V)
117
118 #define FLOAT_REGISTERS DOUBLE_REGISTERS
119 #define SIMD128_REGISTERS DOUBLE_REGISTERS
120
121 #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
122 V(d1) V(d2) V(d3) V(d4) V(d5) V(d6) V(d7) \
123 V(d8) V(d9) V(d10) V(d11) V(d12) V(d15) \
124 V(d16) V(d17) V(d18) V(d19) V(d20) V(d21) V(d22) V(d23) \
125 V(d24) V(d25) V(d26) V(d27) V(d28) V(d29) V(d30) V(d31)
126
127 #define C_REGISTERS(V) \
128 V(cr0) V(cr1) V(cr2) V(cr3) V(cr4) V(cr5) V(cr6) V(cr7) \
129 V(cr8) V(cr9) V(cr10) V(cr11) V(cr12) V(cr15)
130 // clang-format on
131
132 // Register list in load/store instructions
133 // Note that the bit values must match those used in actual instruction encoding
134 const int kNumRegs = 32;
135
136 // Caller-saved/arguments registers
137 const RegList kJSCallerSaved = 1 << 3 | // r3 a1
138 1 << 4 | // r4 a2
139 1 << 5 | // r5 a3
140 1 << 6 | // r6 a4
141 1 << 7 | // r7 a5
142 1 << 8 | // r8 a6
143 1 << 9 | // r9 a7
144 1 << 10 | // r10 a8
145 1 << 11;
146
147 const int kNumJSCallerSaved = 9;
148
149 // Return the code of the n-th caller-saved register available to JavaScript
150 // e.g. JSCallerSavedReg(0) returns r0.code() == 0
151 int JSCallerSavedCode(int n);
152
153 // Callee-saved registers preserved when switching from C to JavaScript
154 const RegList kCalleeSaved = 1 << 14 | // r14
155 1 << 15 | // r15
156 1 << 16 | // r16
157 1 << 17 | // r17
158 1 << 18 | // r18
159 1 << 19 | // r19
160 1 << 20 | // r20
161 1 << 21 | // r21
162 1 << 22 | // r22
163 1 << 23 | // r23
164 1 << 24 | // r24
165 1 << 25 | // r25
166 1 << 26 | // r26
167 1 << 27 | // r27
168 1 << 28 | // r28
169 1 << 29 | // r29
170 1 << 30 | // r20
171 1 << 31; // r31
172
173 const int kNumCalleeSaved = 18;
174
175 const RegList kCallerSavedDoubles = 1 << 0 | // d0
176 1 << 1 | // d1
177 1 << 2 | // d2
178 1 << 3 | // d3
179 1 << 4 | // d4
180 1 << 5 | // d5
181 1 << 6 | // d6
182 1 << 7 | // d7
183 1 << 8 | // d8
184 1 << 9 | // d9
185 1 << 10 | // d10
186 1 << 11 | // d11
187 1 << 12 | // d12
188 1 << 13; // d13
189
190 const int kNumCallerSavedDoubles = 14;
191
192 const RegList kCalleeSavedDoubles = 1 << 14 | // d14
193 1 << 15 | // d15
194 1 << 16 | // d16
195 1 << 17 | // d17
196 1 << 18 | // d18
197 1 << 19 | // d19
198 1 << 20 | // d20
199 1 << 21 | // d21
200 1 << 22 | // d22
201 1 << 23 | // d23
202 1 << 24 | // d24
203 1 << 25 | // d25
204 1 << 26 | // d26
205 1 << 27 | // d27
206 1 << 28 | // d28
207 1 << 29 | // d29
208 1 << 30 | // d30
209 1 << 31; // d31
210
211 const int kNumCalleeSavedDoubles = 18;
212
213 // Number of registers for which space is reserved in safepoints. Must be a
214 // multiple of 8.
215 const int kNumSafepointRegisters = 32;
216
217 // The following constants describe the stack frame linkage area as
218 // defined by the ABI. Note that kNumRequiredStackFrameSlots must
219 // satisfy alignment requirements (rounding up if required).
220 #if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
221 // [0] back chain
222 // [1] condition register save area
223 // [2] link register save area
224 // [3] TOC save area
225 // [4] Parameter1 save area
226 // ...
227 // [11] Parameter8 save area
228 // [12] Parameter9 slot (if necessary)
229 // ...
230 const int kNumRequiredStackFrameSlots = 12;
231 const int kStackFrameLRSlot = 2;
232 const int kStackFrameExtraParamSlot = 12;
233 #elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
234 // [0] back chain
235 // [1] condition register save area
236 // [2] link register save area
237 // [3] reserved for compiler
238 // [4] reserved by binder
239 // [5] TOC save area
240 // [6] Parameter1 save area
241 // ...
242 // [13] Parameter8 save area
243 // [14] Parameter9 slot (if necessary)
244 // ...
245 #if V8_TARGET_ARCH_PPC64
246 const int kNumRequiredStackFrameSlots = 14;
247 #else
248 const int kNumRequiredStackFrameSlots = 16;
249 #endif
250 const int kStackFrameLRSlot = 2;
251 const int kStackFrameExtraParamSlot = 14;
252 #else
253 // [0] back chain
254 // [1] link register save area
255 // [2] Parameter9 slot (if necessary)
256 // ...
257 const int kNumRequiredStackFrameSlots = 4;
258 const int kStackFrameLRSlot = 1;
259 const int kStackFrameExtraParamSlot = 2;
260 #endif
261
262 // Define the list of registers actually saved at safepoints.
263 // Note that the number of saved registers may be smaller than the reserved
264 // space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
265 const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
266 const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
267
268 enum RegisterCode {
269 #define REGISTER_CODE(R) kRegCode_##R,
270 GENERAL_REGISTERS(REGISTER_CODE)
271 #undef REGISTER_CODE
272 kRegAfterLast
273 };
274
275 class Register : public RegisterBase<Register, kRegAfterLast> {
276 public:
277 #if V8_TARGET_LITTLE_ENDIAN
278 static constexpr int kMantissaOffset = 0;
279 static constexpr int kExponentOffset = 4;
280 #else
281 static constexpr int kMantissaOffset = 4;
282 static constexpr int kExponentOffset = 0;
283 #endif
284
285 private:
286 friend class RegisterBase;
Register(int code)287 explicit constexpr Register(int code) : RegisterBase(code) {}
288 };
289
290 ASSERT_TRIVIALLY_COPYABLE(Register);
291 static_assert(sizeof(Register) == sizeof(int),
292 "Register can efficiently be passed by value");
293
294 #define DEFINE_REGISTER(R) \
295 constexpr Register R = Register::from_code<kRegCode_##R>();
296 GENERAL_REGISTERS(DEFINE_REGISTER)
297 #undef DEFINE_REGISTER
298 constexpr Register no_reg = Register::no_reg();
299
300 // Aliases
301 constexpr Register kConstantPoolRegister = r28; // Constant pool.
302 constexpr Register kRootRegister = r29; // Roots array pointer.
303 constexpr Register cp = r30; // JavaScript context pointer.
304
305 constexpr bool kPadArguments = false;
306 constexpr bool kSimpleFPAliasing = true;
307 constexpr bool kSimdMaskRegisters = false;
308
309 enum DoubleRegisterCode {
310 #define REGISTER_CODE(R) kDoubleCode_##R,
311 DOUBLE_REGISTERS(REGISTER_CODE)
312 #undef REGISTER_CODE
313 kDoubleAfterLast
314 };
315
316 // Double word FP register.
317 class DoubleRegister : public RegisterBase<DoubleRegister, kDoubleAfterLast> {
318 public:
319 // A few double registers are reserved: one as a scratch register and one to
320 // hold 0.0, that does not fit in the immediate field of vmov instructions.
321 // d14: 0.0
322 // d15: scratch register.
323 static constexpr int kSizeInBytes = 8;
324 inline static int NumRegisters();
325
326 private:
327 friend class RegisterBase;
DoubleRegister(int code)328 explicit constexpr DoubleRegister(int code) : RegisterBase(code) {}
329 };
330
331 ASSERT_TRIVIALLY_COPYABLE(DoubleRegister);
332 static_assert(sizeof(DoubleRegister) == sizeof(int),
333 "DoubleRegister can efficiently be passed by value");
334
335 typedef DoubleRegister FloatRegister;
336
337 // TODO(ppc) Define SIMD registers.
338 typedef DoubleRegister Simd128Register;
339
340 #define DEFINE_REGISTER(R) \
341 constexpr DoubleRegister R = DoubleRegister::from_code<kDoubleCode_##R>();
342 DOUBLE_REGISTERS(DEFINE_REGISTER)
343 #undef DEFINE_REGISTER
344 constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
345
346 constexpr DoubleRegister kFirstCalleeSavedDoubleReg = d14;
347 constexpr DoubleRegister kLastCalleeSavedDoubleReg = d31;
348 constexpr DoubleRegister kDoubleRegZero = d14;
349 constexpr DoubleRegister kScratchDoubleReg = d13;
350
351 Register ToRegister(int num);
352
353 enum CRegisterCode {
354 #define REGISTER_CODE(R) kCCode_##R,
355 C_REGISTERS(REGISTER_CODE)
356 #undef REGISTER_CODE
357 kCAfterLast
358 };
359
360 // Coprocessor register
361 class CRegister : public RegisterBase<CRegister, kCAfterLast> {
362 friend class RegisterBase;
CRegister(int code)363 explicit constexpr CRegister(int code) : RegisterBase(code) {}
364 };
365
366 constexpr CRegister no_creg = CRegister::no_reg();
367 #define DECLARE_C_REGISTER(R) \
368 constexpr CRegister R = CRegister::from_code<kCCode_##R>();
C_REGISTERS(DECLARE_C_REGISTER)369 C_REGISTERS(DECLARE_C_REGISTER)
370 #undef DECLARE_C_REGISTER
371
372 // -----------------------------------------------------------------------------
373 // Machine instruction Operands
374
375 // Class Operand represents a shifter operand in data processing instructions
376 class Operand BASE_EMBEDDED {
377 public:
378 // immediate
379 V8_INLINE explicit Operand(intptr_t immediate,
380 RelocInfo::Mode rmode = RelocInfo::NONE)
381 : rmode_(rmode) {
382 value_.immediate = immediate;
383 }
384 V8_INLINE static Operand Zero() { return Operand(static_cast<intptr_t>(0)); }
385 V8_INLINE explicit Operand(const ExternalReference& f)
386 : rmode_(RelocInfo::EXTERNAL_REFERENCE) {
387 value_.immediate = static_cast<intptr_t>(f.address());
388 }
389 explicit Operand(Handle<HeapObject> handle);
390 V8_INLINE explicit Operand(Smi* value) : rmode_(RelocInfo::NONE) {
391 value_.immediate = reinterpret_cast<intptr_t>(value);
392 }
393 // rm
394 V8_INLINE explicit Operand(Register rm);
395
396 static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
397 static Operand EmbeddedCode(CodeStub* stub);
398
399 // Return true if this is a register operand.
400 V8_INLINE bool is_reg() const { return rm_.is_valid(); }
401
402 bool must_output_reloc_info(const Assembler* assembler) const;
403
404 inline intptr_t immediate() const {
405 DCHECK(IsImmediate());
406 DCHECK(!IsHeapObjectRequest());
407 return value_.immediate;
408 }
409 bool IsImmediate() const { return !rm_.is_valid(); }
410
411 HeapObjectRequest heap_object_request() const {
412 DCHECK(IsHeapObjectRequest());
413 return value_.heap_object_request;
414 }
415
416 Register rm() const { return rm_; }
417
418 bool IsHeapObjectRequest() const {
419 DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
420 DCHECK_IMPLIES(is_heap_object_request_,
421 rmode_ == RelocInfo::EMBEDDED_OBJECT ||
422 rmode_ == RelocInfo::CODE_TARGET);
423 return is_heap_object_request_;
424 }
425
426 private:
427 Register rm_ = no_reg;
428 union Value {
429 Value() {}
430 HeapObjectRequest heap_object_request; // if is_heap_object_request_
431 intptr_t immediate; // otherwise
432 } value_; // valid if rm_ == no_reg
433 bool is_heap_object_request_ = false;
434
435 RelocInfo::Mode rmode_;
436
437 friend class Assembler;
438 friend class MacroAssembler;
439 };
440
441
442 // Class MemOperand represents a memory operand in load and store instructions
443 // On PowerPC we have base register + 16bit signed value
444 // Alternatively we can have a 16bit signed value immediate
445 class MemOperand BASE_EMBEDDED {
446 public:
447 explicit MemOperand(Register rn, int32_t offset = 0);
448
449 explicit MemOperand(Register ra, Register rb);
450
offset()451 int32_t offset() const {
452 return offset_;
453 }
454
455 // PowerPC - base register
ra()456 Register ra() const {
457 return ra_;
458 }
459
rb()460 Register rb() const {
461 return rb_;
462 }
463
464 private:
465 Register ra_; // base
466 int32_t offset_; // offset
467 Register rb_; // index
468
469 friend class Assembler;
470 };
471
472
473 class DeferredRelocInfo {
474 public:
DeferredRelocInfo()475 DeferredRelocInfo() {}
DeferredRelocInfo(int position,RelocInfo::Mode rmode,intptr_t data)476 DeferredRelocInfo(int position, RelocInfo::Mode rmode, intptr_t data)
477 : position_(position), rmode_(rmode), data_(data) {}
478
position()479 int position() const { return position_; }
rmode()480 RelocInfo::Mode rmode() const { return rmode_; }
data()481 intptr_t data() const { return data_; }
482
483 private:
484 int position_;
485 RelocInfo::Mode rmode_;
486 intptr_t data_;
487 };
488
489
490 class Assembler : public AssemblerBase {
491 public:
492 // Create an assembler. Instructions and relocation information are emitted
493 // into a buffer, with the instructions starting from the beginning and the
494 // relocation information starting from the end of the buffer. See CodeDesc
495 // for a detailed comment on the layout (globals.h).
496 //
497 // If the provided buffer is nullptr, the assembler allocates and grows its
498 // own buffer, and buffer_size determines the initial buffer size. The buffer
499 // is owned by the assembler and deallocated upon destruction of the
500 // assembler.
501 //
502 // If the provided buffer is not nullptr, the assembler uses the provided
503 // buffer for code generation and assumes its size to be buffer_size. If the
504 // buffer is too small, a fatal error occurs. No deallocation of the buffer is
505 // done upon destruction of the assembler.
506 Assembler(const AssemblerOptions& options, void* buffer, int buffer_size);
~Assembler()507 virtual ~Assembler() {}
508
509 // GetCode emits any pending (non-emitted) code and fills the descriptor
510 // desc. GetCode() is idempotent; it returns the same result if no other
511 // Assembler functions are invoked in between GetCode() calls.
512 void GetCode(Isolate* isolate, CodeDesc* desc);
513
514 // Label operations & relative jumps (PPUM Appendix D)
515 //
516 // Takes a branch opcode (cc) and a label (L) and generates
517 // either a backward branch or a forward branch and links it
518 // to the label fixup chain. Usage:
519 //
520 // Label L; // unbound label
521 // j(cc, &L); // forward branch to unbound label
522 // bind(&L); // bind label to the current pc
523 // j(cc, &L); // backward branch to bound label
524 // bind(&L); // illegal: a label may be bound only once
525 //
526 // Note: The same Label can be used for forward and backward branches
527 // but it may be bound only once.
528
529 void bind(Label* L); // binds an unbound label L to the current code position
530
531 // Links a label at the current pc_offset(). If already bound, returns the
532 // bound position. If already linked, returns the position of the prior link.
533 // Otherwise, returns the current pc_offset().
534 int link(Label* L);
535
536 // Determines if Label is bound and near enough so that a single
537 // branch instruction can be used to reach it.
538 bool is_near(Label* L, Condition cond);
539
540 // Returns the branch offset to the given label from the current code position
541 // Links the label to the current position if it is still unbound
branch_offset(Label * L)542 int branch_offset(Label* L) {
543 if (L->is_unused() && !trampoline_emitted_) {
544 TrackBranch();
545 }
546 return link(L) - pc_offset();
547 }
548
549 // Puts a labels target address at the given position.
550 // The high 8 bits are set to zero.
551 void label_at_put(Label* L, int at_offset);
552
553 V8_INLINE static bool IsConstantPoolLoadStart(
554 Address pc, ConstantPoolEntry::Access* access = nullptr);
555 V8_INLINE static bool IsConstantPoolLoadEnd(
556 Address pc, ConstantPoolEntry::Access* access = nullptr);
557 V8_INLINE static int GetConstantPoolOffset(Address pc,
558 ConstantPoolEntry::Access access,
559 ConstantPoolEntry::Type type);
560 V8_INLINE void PatchConstantPoolAccessInstruction(
561 int pc_offset, int offset, ConstantPoolEntry::Access access,
562 ConstantPoolEntry::Type type);
563
564 // Return the address in the constant pool of the code target address used by
565 // the branch/call instruction at pc, or the object in a mov.
566 V8_INLINE static Address target_constant_pool_address_at(
567 Address pc, Address constant_pool, ConstantPoolEntry::Access access,
568 ConstantPoolEntry::Type type);
569
570 // Read/Modify the code target address in the branch/call instruction at pc.
571 // The isolate argument is unused (and may be nullptr) when skipping flushing.
572 V8_INLINE static Address target_address_at(Address pc, Address constant_pool);
573 V8_INLINE static void set_target_address_at(
574 Address pc, Address constant_pool, Address target,
575 ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
576
577 // Return the code target address at a call site from the return address
578 // of that call in the instruction stream.
579 inline static Address target_address_from_return_address(Address pc);
580
581 // Given the address of the beginning of a call, return the address
582 // in the instruction stream that the call will return to.
583 V8_INLINE static Address return_address_from_call_start(Address pc);
584
585 // This sets the branch destination.
586 // This is for calls and branches within generated code.
587 inline static void deserialization_set_special_target_at(
588 Address instruction_payload, Code* code, Address target);
589
590 // Get the size of the special target encoded at 'instruction_payload'.
591 inline static int deserialization_special_target_size(
592 Address instruction_payload);
593
594 // This sets the internal reference at the pc.
595 inline static void deserialization_set_target_internal_reference_at(
596 Address pc, Address target,
597 RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
598
599 // Here we are patching the address in the LUI/ORI instruction pair.
600 // These values are used in the serialization process and must be zero for
601 // PPC platform, as Code, Embedded Object or External-reference pointers
602 // are split across two consecutive instructions and don't exist separately
603 // in the code, so the serializer should not step forwards in memory after
604 // a target is resolved and written.
605 static constexpr int kSpecialTargetSize = 0;
606
607 // Number of instructions to load an address via a mov sequence.
608 #if V8_TARGET_ARCH_PPC64
609 static constexpr int kMovInstructionsConstantPool = 1;
610 static constexpr int kMovInstructionsNoConstantPool = 5;
611 #if defined(V8_PPC_TAGGING_OPT)
612 static constexpr int kTaggedLoadInstructions = 1;
613 #else
614 static constexpr int kTaggedLoadInstructions = 2;
615 #endif
616 #else
617 static constexpr int kMovInstructionsConstantPool = 1;
618 static constexpr int kMovInstructionsNoConstantPool = 2;
619 static constexpr int kTaggedLoadInstructions = 1;
620 #endif
621 static constexpr int kMovInstructions = FLAG_enable_embedded_constant_pool
622 ? kMovInstructionsConstantPool
623 : kMovInstructionsNoConstantPool;
624
625 // Distance between the instruction referring to the address of the call
626 // target and the return address.
627
628 // Call sequence is a FIXED_SEQUENCE:
629 // mov r8, @ call address
630 // mtlr r8
631 // blrl
632 // @ return address
633 static constexpr int kCallTargetAddressOffset =
634 (kMovInstructions + 2) * kInstrSize;
635
encode_crbit(const CRegister & cr,enum CRBit crbit)636 static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
637 return ((cr.code() * CRWIDTH) + crbit);
638 }
639
640 #define DECLARE_PPC_X_INSTRUCTIONS_A_FORM(name, instr_name, instr_value) \
641 inline void name(const Register rt, const Register ra, \
642 const Register rb, const RCBit rc = LeaveRC) { \
643 x_form(instr_name, rt, ra, rb, rc); \
644 }
645
646 #define DECLARE_PPC_X_INSTRUCTIONS_B_FORM(name, instr_name, instr_value) \
647 inline void name(const Register ra, const Register rs, \
648 const Register rb, const RCBit rc = LeaveRC) { \
649 x_form(instr_name, rs, ra, rb, rc); \
650 }
651
652 #define DECLARE_PPC_X_INSTRUCTIONS_C_FORM(name, instr_name, instr_value) \
653 inline void name(const Register dst, const Register src, \
654 const RCBit rc = LeaveRC) { \
655 x_form(instr_name, src, dst, r0, rc); \
656 }
657
658 #define DECLARE_PPC_X_INSTRUCTIONS_D_FORM(name, instr_name, instr_value) \
659 template <class R> \
660 inline void name(const R rt, const Register ra, const Register rb, \
661 const RCBit rc = LeaveRC) { \
662 x_form(instr_name, rt.code(), ra.code(), rb.code(), rc); \
663 } \
664 template <class R> \
665 inline void name(const R dst, const MemOperand& src) { \
666 name(dst, src.ra(), src.rb()); \
667 }
668
669 #define DECLARE_PPC_X_INSTRUCTIONS_E_FORM(name, instr_name, instr_value) \
670 inline void name(const Register dst, const Register src, \
671 const int sh, const RCBit rc = LeaveRC) { \
672 x_form(instr_name, src.code(), dst.code(), sh, rc); \
673 }
674
675 #define DECLARE_PPC_X_INSTRUCTIONS_F_FORM(name, instr_name, instr_value) \
676 inline void name(const Register src1, const Register src2, \
677 const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
678 x_form(instr_name, cr, src1, src2, rc); \
679 } \
680 inline void name##w(const Register src1, const Register src2, \
681 const CRegister cr = cr7, const RCBit rc = LeaveRC) { \
682 x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
683 }
684
685 #define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
686 inline void name(const Register dst, const MemOperand& src) { \
687 x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
688 }
689 #define DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM(name, instr_name, instr_value) \
690 inline void name(const Register dst, const MemOperand& src) { \
691 DCHECK(src.ra_ != r0); \
692 x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
693 }
694
x_form(Instr instr,int f1,int f2,int f3,int rc)695 inline void x_form(Instr instr, int f1, int f2, int f3, int rc) {
696 emit(instr | f1 * B21 | f2 * B16 | f3 * B11 | rc);
697 }
x_form(Instr instr,Register rs,Register ra,Register rb,RCBit rc)698 inline void x_form(Instr instr, Register rs, Register ra, Register rb,
699 RCBit rc) {
700 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | rc);
701 }
702 inline void x_form(Instr instr, Register ra, Register rs, Register rb,
703 EHBit eh = SetEH) {
704 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | eh);
705 }
x_form(Instr instr,CRegister cr,Register s1,Register s2,RCBit rc)706 inline void x_form(Instr instr, CRegister cr, Register s1, Register s2,
707 RCBit rc) {
708 #if V8_TARGET_ARCH_PPC64
709 int L = 1;
710 #else
711 int L = 0;
712 #endif
713 emit(instr | cr.code() * B23 | L * B21 | s1.code() * B16 |
714 s2.code() * B11 | rc);
715 }
716
717 PPC_X_OPCODE_A_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_A_FORM)
PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)718 PPC_X_OPCODE_B_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_B_FORM)
719 PPC_X_OPCODE_C_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_C_FORM)
720 PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
721 PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
722 PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
723 PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
724 PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
725
726 inline void notx(Register dst, Register src, RCBit rc = LeaveRC) {
727 nor(dst, src, src, rc);
728 }
lwax(Register rt,const MemOperand & src)729 inline void lwax(Register rt, const MemOperand& src) {
730 #if V8_TARGET_ARCH_PPC64
731 Register ra = src.ra();
732 Register rb = src.rb();
733 DCHECK(ra != r0);
734 x_form(LWAX, rt, ra, rb, LeaveRC);
735 #else
736 lwzx(rt, src);
737 #endif
738 }
739 inline void extsw(Register rs, Register ra, RCBit rc = LeaveRC) {
740 #if V8_TARGET_ARCH_PPC64
741 emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
742 #else
743 // nop on 32-bit
744 DCHECK(rs == ra && rc == LeaveRC);
745 #endif
746 }
747
748 #undef DECLARE_PPC_X_INSTRUCTIONS_A_FORM
749 #undef DECLARE_PPC_X_INSTRUCTIONS_B_FORM
750 #undef DECLARE_PPC_X_INSTRUCTIONS_C_FORM
751 #undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
752 #undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
753 #undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
754 #undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
755 #undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
756
757 #define DECLARE_PPC_XX3_INSTRUCTIONS(name, instr_name, instr_value) \
758 inline void name(const DoubleRegister rt, const DoubleRegister ra, \
759 const DoubleRegister rb) { \
760 xx3_form(instr_name, rt, ra, rb); \
761 }
762
xx3_form(Instr instr,DoubleRegister t,DoubleRegister a,DoubleRegister b)763 inline void xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
764 DoubleRegister b) {
765 int AX = ((a.code() & 0x20) >> 5) & 0x1;
766 int BX = ((b.code() & 0x20) >> 5) & 0x1;
767 int TX = ((t.code() & 0x20) >> 5) & 0x1;
768
769 emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 |
770 (b.code() & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
771 }
772
773 PPC_XX3_OPCODE_LIST(DECLARE_PPC_XX3_INSTRUCTIONS)
774 #undef DECLARE_PPC_XX3_INSTRUCTIONS
775
776 // ---------------------------------------------------------------------------
777 // Code generation
778
779 // Insert the smallest number of nop instructions
780 // possible to align the pc offset to a multiple
781 // of m. m must be a power of 2 (>= 4).
782 void Align(int m);
783 // Insert the smallest number of zero bytes possible to align the pc offset
784 // to a mulitple of m. m must be a power of 2 (>= 2).
785 void DataAlign(int m);
786 // Aligns code to something that's optimal for a jump target for the platform.
787 void CodeTargetAlign();
788
789 // Branch instructions
790 void bclr(BOfield bo, int condition_bit, LKBit lk);
791 void blr();
792 void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
793 void b(int branch_offset, LKBit lk);
794
795 void bcctr(BOfield bo, int condition_bit, LKBit lk);
796 void bctr();
797 void bctrl();
798
799 // Convenience branch instructions using labels
800 void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L), lk); }
801
cmpi_optimization(CRegister cr)802 inline CRegister cmpi_optimization(CRegister cr) {
803 // Check whether the branch is preceded by an optimizable cmpi against 0.
804 // The cmpi can be deleted if it is also preceded by an instruction that
805 // sets the register used by the compare and supports a dot form.
806 unsigned int sradi_mask = kOpcodeMask | kExt2OpcodeVariant2Mask;
807 unsigned int srawi_mask = kOpcodeMask | kExt2OpcodeMask;
808 int pos = pc_offset();
809 int cmpi_pos = pc_offset() - kInstrSize;
810
811 if (cmpi_pos > 0 && optimizable_cmpi_pos_ == cmpi_pos &&
812 cmpi_cr_.code() == cr.code() && last_bound_pos_ != pos) {
813 int xpos = cmpi_pos - kInstrSize;
814 int xinstr = instr_at(xpos);
815 int cmpi_ra = (instr_at(cmpi_pos) & 0x1f0000) >> 16;
816 // ra is at the same bit position for the three cases below.
817 int ra = (xinstr & 0x1f0000) >> 16;
818 if (cmpi_ra == ra) {
819 if ((xinstr & sradi_mask) == (EXT2 | SRADIX)) {
820 cr = cr0;
821 instr_at_put(xpos, xinstr | SetRC);
822 pc_ -= kInstrSize;
823 } else if ((xinstr & srawi_mask) == (EXT2 | SRAWIX)) {
824 cr = cr0;
825 instr_at_put(xpos, xinstr | SetRC);
826 pc_ -= kInstrSize;
827 } else if ((xinstr & kOpcodeMask) == ANDIx) {
828 cr = cr0;
829 pc_ -= kInstrSize;
830 // nothing to do here since andi. records.
831 }
832 // didn't match one of the above, must keep cmpwi.
833 }
834 }
835 return cr;
836 }
837
838 void bc_short(Condition cond, Label* L, CRegister cr = cr7,
839 LKBit lk = LeaveLK) {
840 DCHECK(cond != al);
841 DCHECK(cr.code() >= 0 && cr.code() <= 7);
842
843 cr = cmpi_optimization(cr);
844
845 int b_offset = branch_offset(L);
846
847 switch (cond) {
848 case eq:
849 bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk);
850 break;
851 case ne:
852 bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk);
853 break;
854 case gt:
855 bc(b_offset, BT, encode_crbit(cr, CR_GT), lk);
856 break;
857 case le:
858 bc(b_offset, BF, encode_crbit(cr, CR_GT), lk);
859 break;
860 case lt:
861 bc(b_offset, BT, encode_crbit(cr, CR_LT), lk);
862 break;
863 case ge:
864 bc(b_offset, BF, encode_crbit(cr, CR_LT), lk);
865 break;
866 case unordered:
867 bc(b_offset, BT, encode_crbit(cr, CR_FU), lk);
868 break;
869 case ordered:
870 bc(b_offset, BF, encode_crbit(cr, CR_FU), lk);
871 break;
872 case overflow:
873 bc(b_offset, BT, encode_crbit(cr, CR_SO), lk);
874 break;
875 case nooverflow:
876 bc(b_offset, BF, encode_crbit(cr, CR_SO), lk);
877 break;
878 default:
879 UNIMPLEMENTED();
880 }
881 }
882
883 void bclr(Condition cond, CRegister cr = cr7, LKBit lk = LeaveLK) {
884 DCHECK(cond != al);
885 DCHECK(cr.code() >= 0 && cr.code() <= 7);
886
887 cr = cmpi_optimization(cr);
888
889 switch (cond) {
890 case eq:
891 bclr(BT, encode_crbit(cr, CR_EQ), lk);
892 break;
893 case ne:
894 bclr(BF, encode_crbit(cr, CR_EQ), lk);
895 break;
896 case gt:
897 bclr(BT, encode_crbit(cr, CR_GT), lk);
898 break;
899 case le:
900 bclr(BF, encode_crbit(cr, CR_GT), lk);
901 break;
902 case lt:
903 bclr(BT, encode_crbit(cr, CR_LT), lk);
904 break;
905 case ge:
906 bclr(BF, encode_crbit(cr, CR_LT), lk);
907 break;
908 case unordered:
909 bclr(BT, encode_crbit(cr, CR_FU), lk);
910 break;
911 case ordered:
912 bclr(BF, encode_crbit(cr, CR_FU), lk);
913 break;
914 case overflow:
915 bclr(BT, encode_crbit(cr, CR_SO), lk);
916 break;
917 case nooverflow:
918 bclr(BF, encode_crbit(cr, CR_SO), lk);
919 break;
920 default:
921 UNIMPLEMENTED();
922 }
923 }
924
925 void isel(Register rt, Register ra, Register rb, int cb);
926 void isel(Condition cond, Register rt, Register ra, Register rb,
927 CRegister cr = cr7) {
928 DCHECK(cond != al);
929 DCHECK(cr.code() >= 0 && cr.code() <= 7);
930
931 cr = cmpi_optimization(cr);
932
933 switch (cond) {
934 case eq:
935 isel(rt, ra, rb, encode_crbit(cr, CR_EQ));
936 break;
937 case ne:
938 isel(rt, rb, ra, encode_crbit(cr, CR_EQ));
939 break;
940 case gt:
941 isel(rt, ra, rb, encode_crbit(cr, CR_GT));
942 break;
943 case le:
944 isel(rt, rb, ra, encode_crbit(cr, CR_GT));
945 break;
946 case lt:
947 isel(rt, ra, rb, encode_crbit(cr, CR_LT));
948 break;
949 case ge:
950 isel(rt, rb, ra, encode_crbit(cr, CR_LT));
951 break;
952 case unordered:
953 isel(rt, ra, rb, encode_crbit(cr, CR_FU));
954 break;
955 case ordered:
956 isel(rt, rb, ra, encode_crbit(cr, CR_FU));
957 break;
958 case overflow:
959 isel(rt, ra, rb, encode_crbit(cr, CR_SO));
960 break;
961 case nooverflow:
962 isel(rt, rb, ra, encode_crbit(cr, CR_SO));
963 break;
964 default:
965 UNIMPLEMENTED();
966 }
967 }
968
969 void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
970 if (cond == al) {
971 b(L, lk);
972 return;
973 }
974
975 if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) {
976 bc_short(cond, L, cr, lk);
977 return;
978 }
979
980 Label skip;
981 Condition neg_cond = NegateCondition(cond);
982 bc_short(neg_cond, &skip, cr);
983 b(L, lk);
984 bind(&skip);
985 }
986
987 void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
988 b(ne, L, cr, lk);
989 }
990 void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
991 b(eq, L, cr, lk);
992 }
993 void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
994 b(lt, L, cr, lk);
995 }
996 void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
997 b(ge, L, cr, lk);
998 }
999 void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1000 b(le, L, cr, lk);
1001 }
1002 void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1003 b(gt, L, cr, lk);
1004 }
1005 void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1006 b(unordered, L, cr, lk);
1007 }
1008 void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
1009 b(ordered, L, cr, lk);
1010 }
1011 void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
1012 b(overflow, L, cr, lk);
1013 }
1014 void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
1015 b(nooverflow, L, cr, lk);
1016 }
1017
1018 // Decrement CTR; branch if CTR != 0
1019 void bdnz(Label* L, LKBit lk = LeaveLK) {
1020 bc(branch_offset(L), DCBNZ, 0, lk);
1021 }
1022
1023 // Data-processing instructions
1024
1025 void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1026 RCBit r = LeaveRC);
1027
1028 void subc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1029 RCBit r = LeaveRC);
1030 void sube(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1031 RCBit r = LeaveRC);
1032
1033 void subfic(Register dst, Register src, const Operand& imm);
1034
1035 void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1036 RCBit r = LeaveRC);
1037
1038 void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1039 RCBit r = LeaveRC);
1040 void adde(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1041 RCBit r = LeaveRC);
1042 void addze(Register dst, Register src1, OEBit o = LeaveOE, RCBit r = LeaveRC);
1043
1044 void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1045 RCBit r = LeaveRC);
1046
1047 void mulhw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
1048 void mulhwu(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
1049
1050 void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1051 RCBit r = LeaveRC);
1052 void divwu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1053 RCBit r = LeaveRC);
1054
1055 void addi(Register dst, Register src, const Operand& imm);
1056 void addis(Register dst, Register src, const Operand& imm);
1057 void addic(Register dst, Register src, const Operand& imm);
1058
1059 void andi(Register ra, Register rs, const Operand& imm);
1060 void andis(Register ra, Register rs, const Operand& imm);
1061 void ori(Register dst, Register src, const Operand& imm);
1062 void oris(Register dst, Register src, const Operand& imm);
1063 void xori(Register dst, Register src, const Operand& imm);
1064 void xoris(Register ra, Register rs, const Operand& imm);
1065 void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
1066 void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
1067 void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
1068 void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7);
1069 void li(Register dst, const Operand& src);
1070 void lis(Register dst, const Operand& imm);
1071 void mr(Register dst, Register src);
1072
1073 void lbz(Register dst, const MemOperand& src);
1074 void lhz(Register dst, const MemOperand& src);
1075 void lha(Register dst, const MemOperand& src);
1076 void lwz(Register dst, const MemOperand& src);
1077 void lwzu(Register dst, const MemOperand& src);
1078 void lwa(Register dst, const MemOperand& src);
1079 void stb(Register dst, const MemOperand& src);
1080 void sth(Register dst, const MemOperand& src);
1081 void stw(Register dst, const MemOperand& src);
1082 void stwu(Register dst, const MemOperand& src);
1083 void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
1084
1085 #if V8_TARGET_ARCH_PPC64
1086 void ld(Register rd, const MemOperand& src);
1087 void ldu(Register rd, const MemOperand& src);
1088 void std(Register rs, const MemOperand& src);
1089 void stdu(Register rs, const MemOperand& src);
1090 void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1091 void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1092 void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
1093 void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC);
1094 void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
1095 void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1096 void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1097 void clrrdi(Register dst, Register src, const Operand& val,
1098 RCBit rc = LeaveRC);
1099 void clrldi(Register dst, Register src, const Operand& val,
1100 RCBit rc = LeaveRC);
1101 void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1102 void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
1103 void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1104 void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1105 void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1106 RCBit r = LeaveRC);
1107 void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1108 RCBit r = LeaveRC);
1109 void divdu(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
1110 RCBit r = LeaveRC);
1111 #endif
1112
1113 void rlwinm(Register ra, Register rs, int sh, int mb, int me,
1114 RCBit rc = LeaveRC);
1115 void rlwimi(Register ra, Register rs, int sh, int mb, int me,
1116 RCBit rc = LeaveRC);
1117 void rlwnm(Register ra, Register rs, Register rb, int mb, int me,
1118 RCBit rc = LeaveRC);
1119 void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1120 void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
1121 void clrrwi(Register dst, Register src, const Operand& val,
1122 RCBit rc = LeaveRC);
1123 void clrlwi(Register dst, Register src, const Operand& val,
1124 RCBit rc = LeaveRC);
1125 void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
1126 void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1127 void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
1128
1129 void subi(Register dst, Register src1, const Operand& src2);
1130
1131 void mov(Register dst, const Operand& src);
1132 void bitwise_mov(Register dst, intptr_t value);
1133 void bitwise_mov32(Register dst, int32_t value);
1134 void bitwise_add32(Register dst, Register src, int32_t value);
1135
1136 // Load the position of the label relative to the generated code object
1137 // pointer in a register.
1138 void mov_label_offset(Register dst, Label* label);
1139
1140 // dst = base + label position + delta
1141 void add_label_offset(Register dst, Register base, Label* label,
1142 int delta = 0);
1143
1144 // Load the address of the label in a register and associate with an
1145 // internal reference relocation.
1146 void mov_label_addr(Register dst, Label* label);
1147
1148 // Emit the address of the label (i.e. a jump table entry) and associate with
1149 // an internal reference relocation.
1150 void emit_label_addr(Label* label);
1151
1152 // Multiply instructions
1153 void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
1154 RCBit r = LeaveRC);
1155
1156 // Miscellaneous arithmetic instructions
1157
1158 // Special register access
1159 void crxor(int bt, int ba, int bb);
crclr(int bt)1160 void crclr(int bt) { crxor(bt, bt, bt); }
1161 void creqv(int bt, int ba, int bb);
crset(int bt)1162 void crset(int bt) { creqv(bt, bt, bt); }
1163 void mflr(Register dst);
1164 void mtlr(Register src);
1165 void mtctr(Register src);
1166 void mtxer(Register src);
1167 void mcrfs(CRegister cr, FPSCRBit bit);
1168 void mfcr(Register dst);
1169 #if V8_TARGET_ARCH_PPC64
1170 void mffprd(Register dst, DoubleRegister src);
1171 void mffprwz(Register dst, DoubleRegister src);
1172 void mtfprd(DoubleRegister dst, Register src);
1173 void mtfprwz(DoubleRegister dst, Register src);
1174 void mtfprwa(DoubleRegister dst, Register src);
1175 #endif
1176
1177 void function_descriptor();
1178
1179 // Exception-generating instructions and debugging support
1180 void stop(const char* msg, Condition cond = al,
1181 int32_t code = kDefaultStopCode, CRegister cr = cr7);
1182
1183 void bkpt(uint32_t imm16); // v5 and above
1184
1185 void dcbf(Register ra, Register rb);
1186 void sync();
1187 void lwsync();
1188 void icbi(Register ra, Register rb);
1189 void isync();
1190
1191 // Support for floating point
1192 void lfd(const DoubleRegister frt, const MemOperand& src);
1193 void lfdu(const DoubleRegister frt, const MemOperand& src);
1194 void lfs(const DoubleRegister frt, const MemOperand& src);
1195 void lfsu(const DoubleRegister frt, const MemOperand& src);
1196 void stfd(const DoubleRegister frs, const MemOperand& src);
1197 void stfdu(const DoubleRegister frs, const MemOperand& src);
1198 void stfs(const DoubleRegister frs, const MemOperand& src);
1199 void stfsu(const DoubleRegister frs, const MemOperand& src);
1200
1201 void fadd(const DoubleRegister frt, const DoubleRegister fra,
1202 const DoubleRegister frb, RCBit rc = LeaveRC);
1203 void fsub(const DoubleRegister frt, const DoubleRegister fra,
1204 const DoubleRegister frb, RCBit rc = LeaveRC);
1205 void fdiv(const DoubleRegister frt, const DoubleRegister fra,
1206 const DoubleRegister frb, RCBit rc = LeaveRC);
1207 void fmul(const DoubleRegister frt, const DoubleRegister fra,
1208 const DoubleRegister frc, RCBit rc = LeaveRC);
1209 void fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1210 CRegister cr = cr7);
1211 void fmr(const DoubleRegister frt, const DoubleRegister frb,
1212 RCBit rc = LeaveRC);
1213 void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
1214 void fctiw(const DoubleRegister frt, const DoubleRegister frb);
1215 void frin(const DoubleRegister frt, const DoubleRegister frb,
1216 RCBit rc = LeaveRC);
1217 void friz(const DoubleRegister frt, const DoubleRegister frb,
1218 RCBit rc = LeaveRC);
1219 void frip(const DoubleRegister frt, const DoubleRegister frb,
1220 RCBit rc = LeaveRC);
1221 void frim(const DoubleRegister frt, const DoubleRegister frb,
1222 RCBit rc = LeaveRC);
1223 void frsp(const DoubleRegister frt, const DoubleRegister frb,
1224 RCBit rc = LeaveRC);
1225 void fcfid(const DoubleRegister frt, const DoubleRegister frb,
1226 RCBit rc = LeaveRC);
1227 void fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1228 RCBit rc = LeaveRC);
1229 void fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1230 RCBit rc = LeaveRC);
1231 void fcfids(const DoubleRegister frt, const DoubleRegister frb,
1232 RCBit rc = LeaveRC);
1233 void fctid(const DoubleRegister frt, const DoubleRegister frb,
1234 RCBit rc = LeaveRC);
1235 void fctidz(const DoubleRegister frt, const DoubleRegister frb,
1236 RCBit rc = LeaveRC);
1237 void fctidu(const DoubleRegister frt, const DoubleRegister frb,
1238 RCBit rc = LeaveRC);
1239 void fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1240 RCBit rc = LeaveRC);
1241 void fsel(const DoubleRegister frt, const DoubleRegister fra,
1242 const DoubleRegister frc, const DoubleRegister frb,
1243 RCBit rc = LeaveRC);
1244 void fneg(const DoubleRegister frt, const DoubleRegister frb,
1245 RCBit rc = LeaveRC);
1246 void mtfsb0(FPSCRBit bit, RCBit rc = LeaveRC);
1247 void mtfsb1(FPSCRBit bit, RCBit rc = LeaveRC);
1248 void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
1249 void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
1250 void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
1251 RCBit rc = LeaveRC);
1252 void fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1253 RCBit rc = LeaveRC);
1254 void fabs(const DoubleRegister frt, const DoubleRegister frb,
1255 RCBit rc = LeaveRC);
1256 void fmadd(const DoubleRegister frt, const DoubleRegister fra,
1257 const DoubleRegister frc, const DoubleRegister frb,
1258 RCBit rc = LeaveRC);
1259 void fmsub(const DoubleRegister frt, const DoubleRegister fra,
1260 const DoubleRegister frc, const DoubleRegister frb,
1261 RCBit rc = LeaveRC);
1262
1263 // Pseudo instructions
1264
1265 // Different nop operations are used by the code generator to detect certain
1266 // states of the generated code.
1267 enum NopMarkerTypes {
1268 NON_MARKING_NOP = 0,
1269 GROUP_ENDING_NOP,
1270 DEBUG_BREAK_NOP,
1271 // IC markers.
1272 PROPERTY_ACCESS_INLINED,
1273 PROPERTY_ACCESS_INLINED_CONTEXT,
1274 PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
1275 // Helper values.
1276 LAST_CODE_MARKER,
1277 FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
1278 };
1279
1280 void nop(int type = 0); // 0 is the default non-marking type.
1281
push(Register src)1282 void push(Register src) {
1283 #if V8_TARGET_ARCH_PPC64
1284 stdu(src, MemOperand(sp, -kPointerSize));
1285 #else
1286 stwu(src, MemOperand(sp, -kPointerSize));
1287 #endif
1288 }
1289
pop(Register dst)1290 void pop(Register dst) {
1291 #if V8_TARGET_ARCH_PPC64
1292 ld(dst, MemOperand(sp));
1293 #else
1294 lwz(dst, MemOperand(sp));
1295 #endif
1296 addi(sp, sp, Operand(kPointerSize));
1297 }
1298
pop()1299 void pop() { addi(sp, sp, Operand(kPointerSize)); }
1300
1301 // Jump unconditionally to given label.
jmp(Label * L)1302 void jmp(Label* L) { b(L); }
1303
1304 // Check the code size generated from label to here.
SizeOfCodeGeneratedSince(Label * label)1305 int SizeOfCodeGeneratedSince(Label* label) {
1306 return pc_offset() - label->pos();
1307 }
1308
1309 // Check the number of instructions generated from label to here.
InstructionsGeneratedSince(Label * label)1310 int InstructionsGeneratedSince(Label* label) {
1311 return SizeOfCodeGeneratedSince(label) / kInstrSize;
1312 }
1313
1314 // Class for scoping postponing the trampoline pool generation.
1315 class BlockTrampolinePoolScope {
1316 public:
BlockTrampolinePoolScope(Assembler * assem)1317 explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
1318 assem_->StartBlockTrampolinePool();
1319 }
~BlockTrampolinePoolScope()1320 ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
1321
1322 private:
1323 Assembler* assem_;
1324
1325 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
1326 };
1327
1328 // Class for scoping disabling constant pool entry merging
1329 class BlockConstantPoolEntrySharingScope {
1330 public:
BlockConstantPoolEntrySharingScope(Assembler * assem)1331 explicit BlockConstantPoolEntrySharingScope(Assembler* assem)
1332 : assem_(assem) {
1333 assem_->StartBlockConstantPoolEntrySharing();
1334 }
~BlockConstantPoolEntrySharingScope()1335 ~BlockConstantPoolEntrySharingScope() {
1336 assem_->EndBlockConstantPoolEntrySharing();
1337 }
1338
1339 private:
1340 Assembler* assem_;
1341
1342 DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstantPoolEntrySharingScope);
1343 };
1344
1345 // Record a comment relocation entry that can be used by a disassembler.
1346 // Use --code-comments to enable.
1347 void RecordComment(const char* msg);
1348
1349 // Record a deoptimization reason that can be used by a log or cpu profiler.
1350 // Use --trace-deopt to enable.
1351 void RecordDeoptReason(DeoptimizeReason reason, SourcePosition position,
1352 int id);
1353
1354 // Writes a single byte or word of data in the code stream. Used
1355 // for inline tables, e.g., jump-tables.
1356 void db(uint8_t data);
1357 void dd(uint32_t data);
1358 void dq(uint64_t data);
1359 void dp(uintptr_t data);
1360
1361 // Read/patch instructions
instr_at(int pos)1362 Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
instr_at_put(int pos,Instr instr)1363 void instr_at_put(int pos, Instr instr) {
1364 *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
1365 }
instr_at(Address pc)1366 static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
instr_at_put(Address pc,Instr instr)1367 static void instr_at_put(Address pc, Instr instr) {
1368 *reinterpret_cast<Instr*>(pc) = instr;
1369 }
1370 static Condition GetCondition(Instr instr);
1371
1372 static bool IsLis(Instr instr);
1373 static bool IsLi(Instr instr);
1374 static bool IsAddic(Instr instr);
1375 static bool IsOri(Instr instr);
1376
1377 static bool IsBranch(Instr instr);
1378 static Register GetRA(Instr instr);
1379 static Register GetRB(Instr instr);
1380 #if V8_TARGET_ARCH_PPC64
1381 static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
1382 Instr instr4, Instr instr5);
1383 #else
1384 static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2);
1385 #endif
1386
1387 static bool IsCmpRegister(Instr instr);
1388 static bool IsCmpImmediate(Instr instr);
1389 static bool IsRlwinm(Instr instr);
1390 static bool IsAndi(Instr instr);
1391 #if V8_TARGET_ARCH_PPC64
1392 static bool IsRldicl(Instr instr);
1393 #endif
1394 static bool IsCrSet(Instr instr);
1395 static Register GetCmpImmediateRegister(Instr instr);
1396 static int GetCmpImmediateRawImmediate(Instr instr);
1397 static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
1398
1399 // Postpone the generation of the trampoline pool for the specified number of
1400 // instructions.
1401 void BlockTrampolinePoolFor(int instructions);
1402 void CheckTrampolinePool();
1403
1404 // For mov. Return the number of actual instructions required to
1405 // load the operand into a register. This can be anywhere from
1406 // one (constant pool small section) to five instructions (full
1407 // 64-bit sequence).
1408 //
1409 // The value returned is only valid as long as no entries are added to the
1410 // constant pool between this call and the actual instruction being emitted.
1411 int instructions_required_for_mov(Register dst, const Operand& src) const;
1412
1413 // Decide between using the constant pool vs. a mov immediate sequence.
1414 bool use_constant_pool_for_mov(Register dst, const Operand& src,
1415 bool canOptimize) const;
1416
1417 // The code currently calls CheckBuffer() too often. This has the side
1418 // effect of randomly growing the buffer in the middle of multi-instruction
1419 // sequences.
1420 //
1421 // This function allows outside callers to check and grow the buffer
1422 void EnsureSpaceFor(int space_needed);
1423
EmitConstantPool()1424 int EmitConstantPool() { return constant_pool_builder_.Emit(this); }
1425
ConstantPoolAccessIsInOverflow()1426 bool ConstantPoolAccessIsInOverflow() const {
1427 return constant_pool_builder_.NextAccess(ConstantPoolEntry::INTPTR) ==
1428 ConstantPoolEntry::OVERFLOWED;
1429 }
1430
ConstantPoolPosition()1431 Label* ConstantPoolPosition() {
1432 return constant_pool_builder_.EmittedPosition();
1433 }
1434
1435 void EmitRelocations();
1436
1437 protected:
buffer_space()1438 int buffer_space() const { return reloc_info_writer.pos() - pc_; }
1439
1440 // Decode instruction(s) at pos and return backchain to previous
1441 // label reference or kEndOfChain.
1442 int target_at(int pos);
1443
1444 // Patch instruction(s) at pos to target target_pos (e.g. branch)
1445 void target_at_put(int pos, int target_pos, bool* is_branch = nullptr);
1446
1447 // Record reloc info for current pc_
1448 void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
ConstantPoolAddEntry(RelocInfo::Mode rmode,intptr_t value)1449 ConstantPoolEntry::Access ConstantPoolAddEntry(RelocInfo::Mode rmode,
1450 intptr_t value) {
1451 bool sharing_ok = RelocInfo::IsNone(rmode) ||
1452 (!options().record_reloc_info_for_serialization &&
1453 RelocInfo::IsShareableRelocMode(rmode) &&
1454 !is_constant_pool_entry_sharing_blocked());
1455 return constant_pool_builder_.AddEntry(pc_offset(), value, sharing_ok);
1456 }
ConstantPoolAddEntry(Double value)1457 ConstantPoolEntry::Access ConstantPoolAddEntry(Double value) {
1458 return constant_pool_builder_.AddEntry(pc_offset(), value);
1459 }
1460
1461 // Block the emission of the trampoline pool before pc_offset.
BlockTrampolinePoolBefore(int pc_offset)1462 void BlockTrampolinePoolBefore(int pc_offset) {
1463 if (no_trampoline_pool_before_ < pc_offset)
1464 no_trampoline_pool_before_ = pc_offset;
1465 }
1466
StartBlockTrampolinePool()1467 void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
EndBlockTrampolinePool()1468 void EndBlockTrampolinePool() {
1469 int count = --trampoline_pool_blocked_nesting_;
1470 if (count == 0) CheckTrampolinePoolQuick();
1471 }
is_trampoline_pool_blocked()1472 bool is_trampoline_pool_blocked() const {
1473 return trampoline_pool_blocked_nesting_ > 0;
1474 }
1475
StartBlockConstantPoolEntrySharing()1476 void StartBlockConstantPoolEntrySharing() {
1477 constant_pool_entry_sharing_blocked_nesting_++;
1478 }
EndBlockConstantPoolEntrySharing()1479 void EndBlockConstantPoolEntrySharing() {
1480 constant_pool_entry_sharing_blocked_nesting_--;
1481 }
is_constant_pool_entry_sharing_blocked()1482 bool is_constant_pool_entry_sharing_blocked() const {
1483 return constant_pool_entry_sharing_blocked_nesting_ > 0;
1484 }
1485
has_exception()1486 bool has_exception() const { return internal_trampoline_exception_; }
1487
is_trampoline_emitted()1488 bool is_trampoline_emitted() const { return trampoline_emitted_; }
1489
1490 // Code generation
1491 // The relocation writer's position is at least kGap bytes below the end of
1492 // the generated instructions. This is so that multi-instruction sequences do
1493 // not have to check for overflow. The same is true for writes of large
1494 // relocation info entries.
1495 static constexpr int kGap = 32;
1496
1497 RelocInfoWriter reloc_info_writer;
1498
1499 private:
1500 // Avoid overflows for displacements etc.
1501 static const int kMaximalBufferSize = 512 * MB;
1502
1503 // Repeated checking whether the trampoline pool should be emitted is rather
1504 // expensive. By default we only check again once a number of instructions
1505 // has been generated.
1506 int next_trampoline_check_; // pc offset of next buffer check.
1507
1508 // Emission of the trampoline pool may be blocked in some code sequences.
1509 int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
1510 int no_trampoline_pool_before_; // Block emission before this pc offset.
1511
1512 // Do not share constant pool entries.
1513 int constant_pool_entry_sharing_blocked_nesting_;
1514
1515 // Relocation info generation
1516 // Each relocation is encoded as a variable size value
1517 static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
1518 std::vector<DeferredRelocInfo> relocations_;
1519
1520 // The bound position, before this we cannot do instruction elimination.
1521 int last_bound_pos_;
1522 // Optimizable cmpi information.
1523 int optimizable_cmpi_pos_;
1524 CRegister cmpi_cr_ = CRegister::no_reg();
1525
1526 ConstantPoolBuilder constant_pool_builder_;
1527
CheckBuffer()1528 void CheckBuffer() {
1529 if (buffer_space() <= kGap) {
1530 GrowBuffer();
1531 }
1532 }
1533
1534 void GrowBuffer(int needed = 0);
1535 // Code emission
emit(Instr x)1536 void emit(Instr x) {
1537 CheckBuffer();
1538 *reinterpret_cast<Instr*>(pc_) = x;
1539 pc_ += kInstrSize;
1540 CheckTrampolinePoolQuick();
1541 }
TrackBranch()1542 void TrackBranch() {
1543 DCHECK(!trampoline_emitted_);
1544 int count = tracked_branch_count_++;
1545 if (count == 0) {
1546 // We leave space (kMaxBlockTrampolineSectionSize)
1547 // for BlockTrampolinePoolScope buffer.
1548 next_trampoline_check_ =
1549 pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
1550 } else {
1551 next_trampoline_check_ -= kTrampolineSlotsSize;
1552 }
1553 }
1554
1555 inline void UntrackBranch();
CheckTrampolinePoolQuick()1556 void CheckTrampolinePoolQuick() {
1557 if (pc_offset() >= next_trampoline_check_) {
1558 CheckTrampolinePool();
1559 }
1560 }
1561
1562 // Instruction generation
1563 void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
1564 DoubleRegister frb, RCBit r);
1565 void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
1566 bool signed_disp);
1567 void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
1568 RCBit r);
1569 void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
1570 RCBit r);
1571 void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
1572 RCBit r);
1573
1574 // Labels
1575 void print(Label* L);
1576 int max_reach_from(int pos);
1577 void bind_to(Label* L, int pos);
1578 void next(Label* L);
1579
1580 class Trampoline {
1581 public:
Trampoline()1582 Trampoline() {
1583 next_slot_ = 0;
1584 free_slot_count_ = 0;
1585 }
Trampoline(int start,int slot_count)1586 Trampoline(int start, int slot_count) {
1587 next_slot_ = start;
1588 free_slot_count_ = slot_count;
1589 }
take_slot()1590 int take_slot() {
1591 int trampoline_slot = kInvalidSlotPos;
1592 if (free_slot_count_ <= 0) {
1593 // We have run out of space on trampolines.
1594 // Make sure we fail in debug mode, so we become aware of each case
1595 // when this happens.
1596 DCHECK(0);
1597 // Internal exception will be caught.
1598 } else {
1599 trampoline_slot = next_slot_;
1600 free_slot_count_--;
1601 next_slot_ += kTrampolineSlotsSize;
1602 }
1603 return trampoline_slot;
1604 }
1605
1606 private:
1607 int next_slot_;
1608 int free_slot_count_;
1609 };
1610
1611 int32_t get_trampoline_entry();
1612 int tracked_branch_count_;
1613 // If trampoline is emitted, generated code is becoming large. As
1614 // this is already a slow case which can possibly break our code
1615 // generation for the extreme case, we use this information to
1616 // trigger different mode of branch instruction generation, where we
1617 // no longer use a single branch instruction.
1618 bool trampoline_emitted_;
1619 static constexpr int kTrampolineSlotsSize = kInstrSize;
1620 static constexpr int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
1621 static constexpr int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
1622 static constexpr int kInvalidSlotPos = -1;
1623
1624 Trampoline trampoline_;
1625 bool internal_trampoline_exception_;
1626
1627 void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
1628
1629 friend class RegExpMacroAssemblerPPC;
1630 friend class RelocInfo;
1631 friend class BlockTrampolinePoolScope;
1632 friend class EnsureSpace;
1633 };
1634
1635
1636 class EnsureSpace BASE_EMBEDDED {
1637 public:
EnsureSpace(Assembler * assembler)1638 explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
1639 };
1640
1641 class PatchingAssembler : public Assembler {
1642 public:
1643 PatchingAssembler(const AssemblerOptions& options, byte* address,
1644 int instructions);
1645 ~PatchingAssembler();
1646 };
1647
1648 } // namespace internal
1649 } // namespace v8
1650
1651 #endif // V8_PPC_ASSEMBLER_PPC_H_
1652