1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36
37 #include "src/ppc/assembler-ppc.h"
38
39 #if V8_TARGET_ARCH_PPC
40
41 #include "src/base/bits.h"
42 #include "src/base/cpu.h"
43 #include "src/code-stubs.h"
44 #include "src/deoptimizer.h"
45 #include "src/macro-assembler.h"
46 #include "src/ppc/assembler-ppc-inl.h"
47
48 namespace v8 {
49 namespace internal {
50
51 // Get the CPU features enabled by the build.
CpuFeaturesImpliedByCompiler()52 static unsigned CpuFeaturesImpliedByCompiler() {
53 unsigned answer = 0;
54 return answer;
55 }
56
57
ProbeImpl(bool cross_compile)58 void CpuFeatures::ProbeImpl(bool cross_compile) {
59 supported_ |= CpuFeaturesImpliedByCompiler();
60 icache_line_size_ = 128;
61
62 // Only use statically determined features for cross compile (snapshot).
63 if (cross_compile) return;
64
65 // Detect whether frim instruction is supported (POWER5+)
66 // For now we will just check for processors we know do not
67 // support it
68 #ifndef USE_SIMULATOR
69 // Probe for additional features at runtime.
70 base::CPU cpu;
71 if (cpu.part() == base::CPU::PPC_POWER9) {
72 supported_ |= (1u << MODULO);
73 }
74 #if V8_TARGET_ARCH_PPC64
75 if (cpu.part() == base::CPU::PPC_POWER8) {
76 supported_ |= (1u << FPR_GPR_MOV);
77 }
78 #endif
79 if (cpu.part() == base::CPU::PPC_POWER6 ||
80 cpu.part() == base::CPU::PPC_POWER7 ||
81 cpu.part() == base::CPU::PPC_POWER8) {
82 supported_ |= (1u << LWSYNC);
83 }
84 if (cpu.part() == base::CPU::PPC_POWER7 ||
85 cpu.part() == base::CPU::PPC_POWER8) {
86 supported_ |= (1u << ISELECT);
87 supported_ |= (1u << VSX);
88 }
89 #if V8_OS_LINUX
90 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
91 // Assume support
92 supported_ |= (1u << FPU);
93 }
94 if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
95 icache_line_size_ = cpu.icache_line_size();
96 }
97 #elif V8_OS_AIX
98 // Assume support FP support and default cache line size
99 supported_ |= (1u << FPU);
100 #endif
101 #else // Simulator
102 supported_ |= (1u << FPU);
103 supported_ |= (1u << LWSYNC);
104 supported_ |= (1u << ISELECT);
105 supported_ |= (1u << VSX);
106 supported_ |= (1u << MODULO);
107 #if V8_TARGET_ARCH_PPC64
108 supported_ |= (1u << FPR_GPR_MOV);
109 #endif
110 #endif
111 }
112
113
PrintTarget()114 void CpuFeatures::PrintTarget() {
115 const char* ppc_arch = nullptr;
116
117 #if V8_TARGET_ARCH_PPC64
118 ppc_arch = "ppc64";
119 #else
120 ppc_arch = "ppc";
121 #endif
122
123 printf("target %s\n", ppc_arch);
124 }
125
126
PrintFeatures()127 void CpuFeatures::PrintFeatures() {
128 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
129 }
130
131
ToRegister(int num)132 Register ToRegister(int num) {
133 DCHECK(num >= 0 && num < kNumRegisters);
134 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
135 r8, r9, r10, r11, ip, r13, r14, r15,
136 r16, r17, r18, r19, r20, r21, r22, r23,
137 r24, r25, r26, r27, r28, r29, r30, fp};
138 return kRegisters[num];
139 }
140
141
142 // -----------------------------------------------------------------------------
143 // Implementation of RelocInfo
144
145 const int RelocInfo::kApplyMask =
146 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
147 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
148
IsCodedSpecially()149 bool RelocInfo::IsCodedSpecially() {
150 // The deserializer needs to know whether a pointer is specially
151 // coded. Being specially coded on PPC means that it is a lis/ori
152 // instruction sequence or is a constant pool entry, and these are
153 // always the case inside code objects.
154 return true;
155 }
156
157
IsInConstantPool()158 bool RelocInfo::IsInConstantPool() {
159 if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
160 return Assembler::IsConstantPoolLoadStart(pc_);
161 }
162 return false;
163 }
164
GetDeoptimizationId(Isolate * isolate,DeoptimizeKind kind)165 int RelocInfo::GetDeoptimizationId(Isolate* isolate, DeoptimizeKind kind) {
166 DCHECK(IsRuntimeEntry(rmode_));
167 return Deoptimizer::GetDeoptimizationId(isolate, target_address(), kind);
168 }
169
set_js_to_wasm_address(Address address,ICacheFlushMode icache_flush_mode)170 void RelocInfo::set_js_to_wasm_address(Address address,
171 ICacheFlushMode icache_flush_mode) {
172 DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
173 Assembler::set_target_address_at(pc_, constant_pool_, address,
174 icache_flush_mode);
175 }
176
js_to_wasm_address() const177 Address RelocInfo::js_to_wasm_address() const {
178 DCHECK_EQ(rmode_, JS_TO_WASM_CALL);
179 return Assembler::target_address_at(pc_, constant_pool_);
180 }
181
wasm_call_tag() const182 uint32_t RelocInfo::wasm_call_tag() const {
183 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
184 return static_cast<uint32_t>(
185 Assembler::target_address_at(pc_, constant_pool_));
186 }
187
188 // -----------------------------------------------------------------------------
189 // Implementation of Operand and MemOperand
190 // See assembler-ppc-inl.h for inlined constructors
191
Operand(Handle<HeapObject> handle)192 Operand::Operand(Handle<HeapObject> handle) {
193 rm_ = no_reg;
194 value_.immediate = static_cast<intptr_t>(handle.address());
195 rmode_ = RelocInfo::EMBEDDED_OBJECT;
196 }
197
EmbeddedNumber(double value)198 Operand Operand::EmbeddedNumber(double value) {
199 int32_t smi;
200 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
201 Operand result(0, RelocInfo::EMBEDDED_OBJECT);
202 result.is_heap_object_request_ = true;
203 result.value_.heap_object_request = HeapObjectRequest(value);
204 return result;
205 }
206
EmbeddedCode(CodeStub * stub)207 Operand Operand::EmbeddedCode(CodeStub* stub) {
208 Operand result(0, RelocInfo::CODE_TARGET);
209 result.is_heap_object_request_ = true;
210 result.value_.heap_object_request = HeapObjectRequest(stub);
211 return result;
212 }
213
MemOperand(Register rn,int32_t offset)214 MemOperand::MemOperand(Register rn, int32_t offset)
215 : ra_(rn), offset_(offset), rb_(no_reg) {}
216
MemOperand(Register ra,Register rb)217 MemOperand::MemOperand(Register ra, Register rb)
218 : ra_(ra), offset_(0), rb_(rb) {}
219
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)220 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
221 for (auto& request : heap_object_requests_) {
222 Handle<HeapObject> object;
223 switch (request.kind()) {
224 case HeapObjectRequest::kHeapNumber:
225 object =
226 isolate->factory()->NewHeapNumber(request.heap_number(), TENURED);
227 break;
228 case HeapObjectRequest::kCodeStub:
229 request.code_stub()->set_isolate(isolate);
230 object = request.code_stub()->GetCode();
231 break;
232 }
233 Address pc = reinterpret_cast<Address>(buffer_) + request.offset();
234 Address constant_pool = kNullAddress;
235 set_target_address_at(pc, constant_pool,
236 reinterpret_cast<Address>(object.location()),
237 SKIP_ICACHE_FLUSH);
238 }
239 }
240
241 // -----------------------------------------------------------------------------
242 // Specific instructions, constants, and masks.
243
Assembler(const AssemblerOptions & options,void * buffer,int buffer_size)244 Assembler::Assembler(const AssemblerOptions& options, void* buffer,
245 int buffer_size)
246 : AssemblerBase(options, buffer, buffer_size),
247 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
248 reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
249
250 no_trampoline_pool_before_ = 0;
251 trampoline_pool_blocked_nesting_ = 0;
252 constant_pool_entry_sharing_blocked_nesting_ = 0;
253 next_trampoline_check_ = kMaxInt;
254 internal_trampoline_exception_ = false;
255 last_bound_pos_ = 0;
256 optimizable_cmpi_pos_ = -1;
257 trampoline_emitted_ = FLAG_force_long_branches;
258 tracked_branch_count_ = 0;
259 relocations_.reserve(128);
260 }
261
GetCode(Isolate * isolate,CodeDesc * desc)262 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
263 // Emit constant pool if necessary.
264 int constant_pool_offset = EmitConstantPool();
265
266 EmitRelocations();
267 AllocateAndInstallRequestedHeapObjects(isolate);
268
269 // Set up code descriptor.
270 desc->buffer = buffer_;
271 desc->buffer_size = buffer_size_;
272 desc->instr_size = pc_offset();
273 desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
274 desc->constant_pool_size =
275 (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
276 desc->origin = this;
277 desc->unwinding_info_size = 0;
278 desc->unwinding_info = nullptr;
279 }
280
281
Align(int m)282 void Assembler::Align(int m) {
283 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
284 DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
285 while ((pc_offset() & (m - 1)) != 0) {
286 nop();
287 }
288 }
289
290
CodeTargetAlign()291 void Assembler::CodeTargetAlign() { Align(8); }
292
293
GetCondition(Instr instr)294 Condition Assembler::GetCondition(Instr instr) {
295 switch (instr & kCondMask) {
296 case BT:
297 return eq;
298 case BF:
299 return ne;
300 default:
301 UNIMPLEMENTED();
302 }
303 return al;
304 }
305
306
IsLis(Instr instr)307 bool Assembler::IsLis(Instr instr) {
308 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
309 }
310
311
IsLi(Instr instr)312 bool Assembler::IsLi(Instr instr) {
313 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
314 }
315
316
IsAddic(Instr instr)317 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
318
319
IsOri(Instr instr)320 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
321
322
IsBranch(Instr instr)323 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
324
325
GetRA(Instr instr)326 Register Assembler::GetRA(Instr instr) {
327 return Register::from_code(Instruction::RAValue(instr));
328 }
329
330
GetRB(Instr instr)331 Register Assembler::GetRB(Instr instr) {
332 return Register::from_code(Instruction::RBValue(instr));
333 }
334
335
336 #if V8_TARGET_ARCH_PPC64
337 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
Is64BitLoadIntoR12(Instr instr1,Instr instr2,Instr instr3,Instr instr4,Instr instr5)338 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
339 Instr instr4, Instr instr5) {
340 // Check the instructions are indeed a five part load (into r12)
341 // 3d800000 lis r12, 0
342 // 618c0000 ori r12, r12, 0
343 // 798c07c6 rldicr r12, r12, 32, 31
344 // 658c00c3 oris r12, r12, 195
345 // 618ccd40 ori r12, r12, 52544
346 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
347 (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
348 ((instr5 >> 16) == 0x618C));
349 }
350 #else
351 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
Is32BitLoadIntoR12(Instr instr1,Instr instr2)352 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
353 // Check the instruction is indeed a two part load (into r12)
354 // 3d802553 lis r12, 9555
355 // 618c5000 ori r12, r12, 20480
356 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
357 }
358 #endif
359
360
IsCmpRegister(Instr instr)361 bool Assembler::IsCmpRegister(Instr instr) {
362 return (((instr & kOpcodeMask) == EXT2) &&
363 ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
364 }
365
366
IsRlwinm(Instr instr)367 bool Assembler::IsRlwinm(Instr instr) {
368 return ((instr & kOpcodeMask) == RLWINMX);
369 }
370
371
IsAndi(Instr instr)372 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
373
374
375 #if V8_TARGET_ARCH_PPC64
IsRldicl(Instr instr)376 bool Assembler::IsRldicl(Instr instr) {
377 return (((instr & kOpcodeMask) == EXT5) &&
378 ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
379 }
380 #endif
381
382
IsCmpImmediate(Instr instr)383 bool Assembler::IsCmpImmediate(Instr instr) {
384 return ((instr & kOpcodeMask) == CMPI);
385 }
386
387
IsCrSet(Instr instr)388 bool Assembler::IsCrSet(Instr instr) {
389 return (((instr & kOpcodeMask) == EXT1) &&
390 ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
391 }
392
393
GetCmpImmediateRegister(Instr instr)394 Register Assembler::GetCmpImmediateRegister(Instr instr) {
395 DCHECK(IsCmpImmediate(instr));
396 return GetRA(instr);
397 }
398
399
GetCmpImmediateRawImmediate(Instr instr)400 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
401 DCHECK(IsCmpImmediate(instr));
402 return instr & kOff16Mask;
403 }
404
405
406 // Labels refer to positions in the (to be) generated code.
407 // There are bound, linked, and unused labels.
408 //
409 // Bound labels refer to known positions in the already
410 // generated code. pos() is the position the label refers to.
411 //
412 // Linked labels refer to unknown positions in the code
413 // to be generated; pos() is the position of the last
414 // instruction using the label.
415
416
417 // The link chain is terminated by a negative code position (must be aligned)
418 const int kEndOfChain = -4;
419
420
421 // Dummy opcodes for unbound label mov instructions or jump table entries.
422 enum {
423 kUnboundMovLabelOffsetOpcode = 0 << 26,
424 kUnboundAddLabelOffsetOpcode = 1 << 26,
425 kUnboundAddLabelLongOffsetOpcode = 2 << 26,
426 kUnboundMovLabelAddrOpcode = 3 << 26,
427 kUnboundJumpTableEntryOpcode = 4 << 26
428 };
429
target_at(int pos)430 int Assembler::target_at(int pos) {
431 Instr instr = instr_at(pos);
432 // check which type of branch this is 16 or 26 bit offset
433 uint32_t opcode = instr & kOpcodeMask;
434 int link;
435 switch (opcode) {
436 case BX:
437 link = SIGN_EXT_IMM26(instr & kImm26Mask);
438 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
439 break;
440 case BCX:
441 link = SIGN_EXT_IMM16((instr & kImm16Mask));
442 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
443 break;
444 case kUnboundMovLabelOffsetOpcode:
445 case kUnboundAddLabelOffsetOpcode:
446 case kUnboundAddLabelLongOffsetOpcode:
447 case kUnboundMovLabelAddrOpcode:
448 case kUnboundJumpTableEntryOpcode:
449 link = SIGN_EXT_IMM26(instr & kImm26Mask);
450 link <<= 2;
451 break;
452 default:
453 DCHECK(false);
454 return -1;
455 }
456
457 if (link == 0) return kEndOfChain;
458 return pos + link;
459 }
460
461
target_at_put(int pos,int target_pos,bool * is_branch)462 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
463 Instr instr = instr_at(pos);
464 uint32_t opcode = instr & kOpcodeMask;
465
466 if (is_branch != nullptr) {
467 *is_branch = (opcode == BX || opcode == BCX);
468 }
469
470 switch (opcode) {
471 case BX: {
472 int imm26 = target_pos - pos;
473 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
474 if (imm26 == kInstrSize && !(instr & kLKMask)) {
475 // Branch to next instr without link.
476 instr = ORI; // nop: ori, 0,0,0
477 } else {
478 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
479 instr |= (imm26 & kImm26Mask);
480 }
481 instr_at_put(pos, instr);
482 break;
483 }
484 case BCX: {
485 int imm16 = target_pos - pos;
486 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
487 if (imm16 == kInstrSize && !(instr & kLKMask)) {
488 // Branch to next instr without link.
489 instr = ORI; // nop: ori, 0,0,0
490 } else {
491 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
492 instr |= (imm16 & kImm16Mask);
493 }
494 instr_at_put(pos, instr);
495 break;
496 }
497 case kUnboundMovLabelOffsetOpcode: {
498 // Load the position of the label relative to the generated code object
499 // pointer in a register.
500 Register dst = Register::from_code(instr_at(pos + kInstrSize));
501 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
502 PatchingAssembler patcher(options(),
503 reinterpret_cast<byte*>(buffer_ + pos), 2);
504 patcher.bitwise_mov32(dst, offset);
505 break;
506 }
507 case kUnboundAddLabelLongOffsetOpcode:
508 case kUnboundAddLabelOffsetOpcode: {
509 // dst = base + position + immediate
510 Instr operands = instr_at(pos + kInstrSize);
511 Register dst = Register::from_code((operands >> 27) & 0x1F);
512 Register base = Register::from_code((operands >> 22) & 0x1F);
513 int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
514 ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
515 : (SIGN_EXT_IMM22(operands & kImm22Mask));
516 int32_t offset = target_pos + delta;
517 PatchingAssembler patcher(
518 options(), reinterpret_cast<byte*>(buffer_ + pos),
519 2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
520 patcher.bitwise_add32(dst, base, offset);
521 if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
522 break;
523 }
524 case kUnboundMovLabelAddrOpcode: {
525 // Load the address of the label in a register.
526 Register dst = Register::from_code(instr_at(pos + kInstrSize));
527 PatchingAssembler patcher(options(),
528 reinterpret_cast<byte*>(buffer_ + pos),
529 kMovInstructionsNoConstantPool);
530 // Keep internal references relative until EmitRelocations.
531 patcher.bitwise_mov(dst, target_pos);
532 break;
533 }
534 case kUnboundJumpTableEntryOpcode: {
535 PatchingAssembler patcher(options(),
536 reinterpret_cast<byte*>(buffer_ + pos),
537 kPointerSize / kInstrSize);
538 // Keep internal references relative until EmitRelocations.
539 patcher.dp(target_pos);
540 break;
541 }
542 default:
543 DCHECK(false);
544 break;
545 }
546 }
547
548
max_reach_from(int pos)549 int Assembler::max_reach_from(int pos) {
550 Instr instr = instr_at(pos);
551 uint32_t opcode = instr & kOpcodeMask;
552
553 // check which type of branch this is 16 or 26 bit offset
554 switch (opcode) {
555 case BX:
556 return 26;
557 case BCX:
558 return 16;
559 case kUnboundMovLabelOffsetOpcode:
560 case kUnboundAddLabelOffsetOpcode:
561 case kUnboundMovLabelAddrOpcode:
562 case kUnboundJumpTableEntryOpcode:
563 return 0; // no limit on reach
564 }
565
566 DCHECK(false);
567 return 0;
568 }
569
570
bind_to(Label * L,int pos)571 void Assembler::bind_to(Label* L, int pos) {
572 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
573 int32_t trampoline_pos = kInvalidSlotPos;
574 bool is_branch = false;
575 while (L->is_linked()) {
576 int fixup_pos = L->pos();
577 int32_t offset = pos - fixup_pos;
578 int maxReach = max_reach_from(fixup_pos);
579 next(L); // call next before overwriting link with target at fixup_pos
580 if (maxReach && is_intn(offset, maxReach) == false) {
581 if (trampoline_pos == kInvalidSlotPos) {
582 trampoline_pos = get_trampoline_entry();
583 CHECK_NE(trampoline_pos, kInvalidSlotPos);
584 target_at_put(trampoline_pos, pos);
585 }
586 target_at_put(fixup_pos, trampoline_pos);
587 } else {
588 target_at_put(fixup_pos, pos, &is_branch);
589 }
590 }
591 L->bind_to(pos);
592
593 if (!trampoline_emitted_ && is_branch) {
594 UntrackBranch();
595 }
596
597 // Keep track of the last bound label so we don't eliminate any instructions
598 // before a bound label.
599 if (pos > last_bound_pos_) last_bound_pos_ = pos;
600 }
601
602
bind(Label * L)603 void Assembler::bind(Label* L) {
604 DCHECK(!L->is_bound()); // label can only be bound once
605 bind_to(L, pc_offset());
606 }
607
608
next(Label * L)609 void Assembler::next(Label* L) {
610 DCHECK(L->is_linked());
611 int link = target_at(L->pos());
612 if (link == kEndOfChain) {
613 L->Unuse();
614 } else {
615 DCHECK_GE(link, 0);
616 L->link_to(link);
617 }
618 }
619
620
is_near(Label * L,Condition cond)621 bool Assembler::is_near(Label* L, Condition cond) {
622 DCHECK(L->is_bound());
623 if (L->is_bound() == false) return false;
624
625 int maxReach = ((cond == al) ? 26 : 16);
626 int offset = L->pos() - pc_offset();
627
628 return is_intn(offset, maxReach);
629 }
630
631
a_form(Instr instr,DoubleRegister frt,DoubleRegister fra,DoubleRegister frb,RCBit r)632 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
633 DoubleRegister frb, RCBit r) {
634 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
635 }
636
637
d_form(Instr instr,Register rt,Register ra,const intptr_t val,bool signed_disp)638 void Assembler::d_form(Instr instr, Register rt, Register ra,
639 const intptr_t val, bool signed_disp) {
640 if (signed_disp) {
641 if (!is_int16(val)) {
642 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
643 }
644 CHECK(is_int16(val));
645 } else {
646 if (!is_uint16(val)) {
647 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
648 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
649 val, val, is_uint16(val), kImm16Mask);
650 }
651 CHECK(is_uint16(val));
652 }
653 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
654 }
655
xo_form(Instr instr,Register rt,Register ra,Register rb,OEBit o,RCBit r)656 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
657 OEBit o, RCBit r) {
658 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
659 }
660
md_form(Instr instr,Register ra,Register rs,int shift,int maskbit,RCBit r)661 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
662 int maskbit, RCBit r) {
663 int sh0_4 = shift & 0x1F;
664 int sh5 = (shift >> 5) & 0x1;
665 int m0_4 = maskbit & 0x1F;
666 int m5 = (maskbit >> 5) & 0x1;
667
668 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
669 m5 * B5 | sh5 * B1 | r);
670 }
671
672
mds_form(Instr instr,Register ra,Register rs,Register rb,int maskbit,RCBit r)673 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
674 int maskbit, RCBit r) {
675 int m0_4 = maskbit & 0x1F;
676 int m5 = (maskbit >> 5) & 0x1;
677
678 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
679 m5 * B5 | r);
680 }
681
682
683 // Returns the next free trampoline entry.
get_trampoline_entry()684 int32_t Assembler::get_trampoline_entry() {
685 int32_t trampoline_entry = kInvalidSlotPos;
686
687 if (!internal_trampoline_exception_) {
688 trampoline_entry = trampoline_.take_slot();
689
690 if (kInvalidSlotPos == trampoline_entry) {
691 internal_trampoline_exception_ = true;
692 }
693 }
694 return trampoline_entry;
695 }
696
697
link(Label * L)698 int Assembler::link(Label* L) {
699 int position;
700 if (L->is_bound()) {
701 position = L->pos();
702 } else {
703 if (L->is_linked()) {
704 position = L->pos(); // L's link
705 } else {
706 // was: target_pos = kEndOfChain;
707 // However, using self to mark the first reference
708 // should avoid most instances of branch offset overflow. See
709 // target_at() for where this is converted back to kEndOfChain.
710 position = pc_offset();
711 }
712 L->link_to(pc_offset());
713 }
714
715 return position;
716 }
717
718
719 // Branch instructions.
720
721
bclr(BOfield bo,int condition_bit,LKBit lk)722 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
723 emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
724 }
725
726
bcctr(BOfield bo,int condition_bit,LKBit lk)727 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
728 emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
729 }
730
731
732 // Pseudo op - branch to link register
blr()733 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
734
735
736 // Pseudo op - branch to count register -- used for "jump"
bctr()737 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
738
739
bctrl()740 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
741
742
bc(int branch_offset,BOfield bo,int condition_bit,LKBit lk)743 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
744 int imm16 = branch_offset;
745 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
746 emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
747 }
748
749
b(int branch_offset,LKBit lk)750 void Assembler::b(int branch_offset, LKBit lk) {
751 int imm26 = branch_offset;
752 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
753 emit(BX | (imm26 & kImm26Mask) | lk);
754 }
755
756
xori(Register dst,Register src,const Operand & imm)757 void Assembler::xori(Register dst, Register src, const Operand& imm) {
758 d_form(XORI, src, dst, imm.immediate(), false);
759 }
760
761
xoris(Register ra,Register rs,const Operand & imm)762 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
763 d_form(XORIS, rs, ra, imm.immediate(), false);
764 }
765
766
rlwinm(Register ra,Register rs,int sh,int mb,int me,RCBit rc)767 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
768 RCBit rc) {
769 sh &= 0x1F;
770 mb &= 0x1F;
771 me &= 0x1F;
772 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
773 me << 1 | rc);
774 }
775
776
rlwnm(Register ra,Register rs,Register rb,int mb,int me,RCBit rc)777 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
778 RCBit rc) {
779 mb &= 0x1F;
780 me &= 0x1F;
781 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
782 me << 1 | rc);
783 }
784
785
rlwimi(Register ra,Register rs,int sh,int mb,int me,RCBit rc)786 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
787 RCBit rc) {
788 sh &= 0x1F;
789 mb &= 0x1F;
790 me &= 0x1F;
791 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
792 me << 1 | rc);
793 }
794
795
slwi(Register dst,Register src,const Operand & val,RCBit rc)796 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
797 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
798 rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
799 }
800
801
srwi(Register dst,Register src,const Operand & val,RCBit rc)802 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
803 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
804 rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
805 }
806
807
clrrwi(Register dst,Register src,const Operand & val,RCBit rc)808 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
809 RCBit rc) {
810 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
811 rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
812 }
813
814
clrlwi(Register dst,Register src,const Operand & val,RCBit rc)815 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
816 RCBit rc) {
817 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
818 rlwinm(dst, src, 0, val.immediate(), 31, rc);
819 }
820
821
rotlw(Register ra,Register rs,Register rb,RCBit r)822 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
823 rlwnm(ra, rs, rb, 0, 31, r);
824 }
825
826
rotlwi(Register ra,Register rs,int sh,RCBit r)827 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
828 rlwinm(ra, rs, sh, 0, 31, r);
829 }
830
831
rotrwi(Register ra,Register rs,int sh,RCBit r)832 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
833 rlwinm(ra, rs, 32 - sh, 0, 31, r);
834 }
835
836
subi(Register dst,Register src,const Operand & imm)837 void Assembler::subi(Register dst, Register src, const Operand& imm) {
838 addi(dst, src, Operand(-(imm.immediate())));
839 }
840
addc(Register dst,Register src1,Register src2,OEBit o,RCBit r)841 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
842 RCBit r) {
843 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
844 }
845
adde(Register dst,Register src1,Register src2,OEBit o,RCBit r)846 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
847 RCBit r) {
848 xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
849 }
850
addze(Register dst,Register src1,OEBit o,RCBit r)851 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
852 // a special xo_form
853 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
854 }
855
856
sub(Register dst,Register src1,Register src2,OEBit o,RCBit r)857 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
858 RCBit r) {
859 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
860 }
861
subc(Register dst,Register src1,Register src2,OEBit o,RCBit r)862 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
863 RCBit r) {
864 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
865 }
866
sube(Register dst,Register src1,Register src2,OEBit o,RCBit r)867 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
868 RCBit r) {
869 xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
870 }
871
subfic(Register dst,Register src,const Operand & imm)872 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
873 d_form(SUBFIC, dst, src, imm.immediate(), true);
874 }
875
876
add(Register dst,Register src1,Register src2,OEBit o,RCBit r)877 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
878 RCBit r) {
879 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
880 }
881
882
883 // Multiply low word
mullw(Register dst,Register src1,Register src2,OEBit o,RCBit r)884 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
885 RCBit r) {
886 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
887 }
888
889
890 // Multiply hi word
mulhw(Register dst,Register src1,Register src2,RCBit r)891 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
892 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
893 }
894
895
896 // Multiply hi word unsigned
mulhwu(Register dst,Register src1,Register src2,RCBit r)897 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
898 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
899 }
900
901
902 // Divide word
divw(Register dst,Register src1,Register src2,OEBit o,RCBit r)903 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
904 RCBit r) {
905 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
906 }
907
908
909 // Divide word unsigned
divwu(Register dst,Register src1,Register src2,OEBit o,RCBit r)910 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
911 RCBit r) {
912 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
913 }
914
915
addi(Register dst,Register src,const Operand & imm)916 void Assembler::addi(Register dst, Register src, const Operand& imm) {
917 DCHECK(src != r0); // use li instead to show intent
918 d_form(ADDI, dst, src, imm.immediate(), true);
919 }
920
921
addis(Register dst,Register src,const Operand & imm)922 void Assembler::addis(Register dst, Register src, const Operand& imm) {
923 DCHECK(src != r0); // use lis instead to show intent
924 d_form(ADDIS, dst, src, imm.immediate(), true);
925 }
926
927
addic(Register dst,Register src,const Operand & imm)928 void Assembler::addic(Register dst, Register src, const Operand& imm) {
929 d_form(ADDIC, dst, src, imm.immediate(), true);
930 }
931
932
andi(Register ra,Register rs,const Operand & imm)933 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
934 d_form(ANDIx, rs, ra, imm.immediate(), false);
935 }
936
937
andis(Register ra,Register rs,const Operand & imm)938 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
939 d_form(ANDISx, rs, ra, imm.immediate(), false);
940 }
941
942
ori(Register ra,Register rs,const Operand & imm)943 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
944 d_form(ORI, rs, ra, imm.immediate(), false);
945 }
946
947
oris(Register dst,Register src,const Operand & imm)948 void Assembler::oris(Register dst, Register src, const Operand& imm) {
949 d_form(ORIS, src, dst, imm.immediate(), false);
950 }
951
952
cmpi(Register src1,const Operand & src2,CRegister cr)953 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
954 intptr_t imm16 = src2.immediate();
955 #if V8_TARGET_ARCH_PPC64
956 int L = 1;
957 #else
958 int L = 0;
959 #endif
960 DCHECK(is_int16(imm16));
961 DCHECK(cr.code() >= 0 && cr.code() <= 7);
962 imm16 &= kImm16Mask;
963 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
964 }
965
966
cmpli(Register src1,const Operand & src2,CRegister cr)967 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
968 uintptr_t uimm16 = src2.immediate();
969 #if V8_TARGET_ARCH_PPC64
970 int L = 1;
971 #else
972 int L = 0;
973 #endif
974 DCHECK(is_uint16(uimm16));
975 DCHECK(cr.code() >= 0 && cr.code() <= 7);
976 uimm16 &= kImm16Mask;
977 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
978 }
979
980
cmpwi(Register src1,const Operand & src2,CRegister cr)981 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
982 intptr_t imm16 = src2.immediate();
983 int L = 0;
984 int pos = pc_offset();
985 DCHECK(is_int16(imm16));
986 DCHECK(cr.code() >= 0 && cr.code() <= 7);
987 imm16 &= kImm16Mask;
988
989 // For cmpwi against 0, save postition and cr for later examination
990 // of potential optimization.
991 if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
992 optimizable_cmpi_pos_ = pos;
993 cmpi_cr_ = cr;
994 }
995 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
996 }
997
998
cmplwi(Register src1,const Operand & src2,CRegister cr)999 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
1000 uintptr_t uimm16 = src2.immediate();
1001 int L = 0;
1002 DCHECK(is_uint16(uimm16));
1003 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1004 uimm16 &= kImm16Mask;
1005 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
1006 }
1007
1008
isel(Register rt,Register ra,Register rb,int cb)1009 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
1010 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
1011 cb * B6);
1012 }
1013
1014
1015 // Pseudo op - load immediate
li(Register dst,const Operand & imm)1016 void Assembler::li(Register dst, const Operand& imm) {
1017 d_form(ADDI, dst, r0, imm.immediate(), true);
1018 }
1019
1020
lis(Register dst,const Operand & imm)1021 void Assembler::lis(Register dst, const Operand& imm) {
1022 d_form(ADDIS, dst, r0, imm.immediate(), true);
1023 }
1024
1025
1026 // Pseudo op - move register
mr(Register dst,Register src)1027 void Assembler::mr(Register dst, Register src) {
1028 // actually or(dst, src, src)
1029 orx(dst, src, src);
1030 }
1031
1032
lbz(Register dst,const MemOperand & src)1033 void Assembler::lbz(Register dst, const MemOperand& src) {
1034 DCHECK(src.ra_ != r0);
1035 d_form(LBZ, dst, src.ra(), src.offset(), true);
1036 }
1037
1038
lhz(Register dst,const MemOperand & src)1039 void Assembler::lhz(Register dst, const MemOperand& src) {
1040 DCHECK(src.ra_ != r0);
1041 d_form(LHZ, dst, src.ra(), src.offset(), true);
1042 }
1043
1044
lwz(Register dst,const MemOperand & src)1045 void Assembler::lwz(Register dst, const MemOperand& src) {
1046 DCHECK(src.ra_ != r0);
1047 d_form(LWZ, dst, src.ra(), src.offset(), true);
1048 }
1049
1050
lwzu(Register dst,const MemOperand & src)1051 void Assembler::lwzu(Register dst, const MemOperand& src) {
1052 DCHECK(src.ra_ != r0);
1053 d_form(LWZU, dst, src.ra(), src.offset(), true);
1054 }
1055
1056
lha(Register dst,const MemOperand & src)1057 void Assembler::lha(Register dst, const MemOperand& src) {
1058 DCHECK(src.ra_ != r0);
1059 d_form(LHA, dst, src.ra(), src.offset(), true);
1060 }
1061
1062
lwa(Register dst,const MemOperand & src)1063 void Assembler::lwa(Register dst, const MemOperand& src) {
1064 #if V8_TARGET_ARCH_PPC64
1065 int offset = src.offset();
1066 DCHECK(src.ra_ != r0);
1067 CHECK(!(offset & 3) && is_int16(offset));
1068 offset = kImm16Mask & offset;
1069 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
1070 #else
1071 lwz(dst, src);
1072 #endif
1073 }
1074
stb(Register dst,const MemOperand & src)1075 void Assembler::stb(Register dst, const MemOperand& src) {
1076 DCHECK(src.ra_ != r0);
1077 d_form(STB, dst, src.ra(), src.offset(), true);
1078 }
1079
1080
sth(Register dst,const MemOperand & src)1081 void Assembler::sth(Register dst, const MemOperand& src) {
1082 DCHECK(src.ra_ != r0);
1083 d_form(STH, dst, src.ra(), src.offset(), true);
1084 }
1085
1086
stw(Register dst,const MemOperand & src)1087 void Assembler::stw(Register dst, const MemOperand& src) {
1088 DCHECK(src.ra_ != r0);
1089 d_form(STW, dst, src.ra(), src.offset(), true);
1090 }
1091
1092
stwu(Register dst,const MemOperand & src)1093 void Assembler::stwu(Register dst, const MemOperand& src) {
1094 DCHECK(src.ra_ != r0);
1095 d_form(STWU, dst, src.ra(), src.offset(), true);
1096 }
1097
1098
neg(Register rt,Register ra,OEBit o,RCBit r)1099 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1100 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1101 }
1102
1103
1104 #if V8_TARGET_ARCH_PPC64
1105 // 64bit specific instructions
ld(Register rd,const MemOperand & src)1106 void Assembler::ld(Register rd, const MemOperand& src) {
1107 int offset = src.offset();
1108 DCHECK(src.ra_ != r0);
1109 CHECK(!(offset & 3) && is_int16(offset));
1110 offset = kImm16Mask & offset;
1111 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1112 }
1113
1114
ldu(Register rd,const MemOperand & src)1115 void Assembler::ldu(Register rd, const MemOperand& src) {
1116 int offset = src.offset();
1117 DCHECK(src.ra_ != r0);
1118 CHECK(!(offset & 3) && is_int16(offset));
1119 offset = kImm16Mask & offset;
1120 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1121 }
1122
1123
std(Register rs,const MemOperand & src)1124 void Assembler::std(Register rs, const MemOperand& src) {
1125 int offset = src.offset();
1126 DCHECK(src.ra_ != r0);
1127 CHECK(!(offset & 3) && is_int16(offset));
1128 offset = kImm16Mask & offset;
1129 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1130 }
1131
1132
stdu(Register rs,const MemOperand & src)1133 void Assembler::stdu(Register rs, const MemOperand& src) {
1134 int offset = src.offset();
1135 DCHECK(src.ra_ != r0);
1136 CHECK(!(offset & 3) && is_int16(offset));
1137 offset = kImm16Mask & offset;
1138 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1139 }
1140
1141
rldic(Register ra,Register rs,int sh,int mb,RCBit r)1142 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1143 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1144 }
1145
1146
rldicl(Register ra,Register rs,int sh,int mb,RCBit r)1147 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1148 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1149 }
1150
1151
rldcl(Register ra,Register rs,Register rb,int mb,RCBit r)1152 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1153 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1154 }
1155
1156
rldicr(Register ra,Register rs,int sh,int me,RCBit r)1157 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1158 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1159 }
1160
1161
sldi(Register dst,Register src,const Operand & val,RCBit rc)1162 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1163 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1164 rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1165 }
1166
1167
srdi(Register dst,Register src,const Operand & val,RCBit rc)1168 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1169 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1170 rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1171 }
1172
1173
clrrdi(Register dst,Register src,const Operand & val,RCBit rc)1174 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1175 RCBit rc) {
1176 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1177 rldicr(dst, src, 0, 63 - val.immediate(), rc);
1178 }
1179
1180
clrldi(Register dst,Register src,const Operand & val,RCBit rc)1181 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1182 RCBit rc) {
1183 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1184 rldicl(dst, src, 0, val.immediate(), rc);
1185 }
1186
1187
rldimi(Register ra,Register rs,int sh,int mb,RCBit r)1188 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1189 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1190 }
1191
1192
sradi(Register ra,Register rs,int sh,RCBit r)1193 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1194 int sh0_4 = sh & 0x1F;
1195 int sh5 = (sh >> 5) & 0x1;
1196
1197 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1198 sh5 * B1 | r);
1199 }
1200
1201
rotld(Register ra,Register rs,Register rb,RCBit r)1202 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1203 rldcl(ra, rs, rb, 0, r);
1204 }
1205
1206
rotldi(Register ra,Register rs,int sh,RCBit r)1207 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1208 rldicl(ra, rs, sh, 0, r);
1209 }
1210
1211
rotrdi(Register ra,Register rs,int sh,RCBit r)1212 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1213 rldicl(ra, rs, 64 - sh, 0, r);
1214 }
1215
1216
mulld(Register dst,Register src1,Register src2,OEBit o,RCBit r)1217 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1218 RCBit r) {
1219 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1220 }
1221
1222
divd(Register dst,Register src1,Register src2,OEBit o,RCBit r)1223 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1224 RCBit r) {
1225 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1226 }
1227
1228
divdu(Register dst,Register src1,Register src2,OEBit o,RCBit r)1229 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1230 RCBit r) {
1231 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1232 }
1233 #endif
1234
1235
1236 // Function descriptor for AIX.
1237 // Code address skips the function descriptor "header".
1238 // TOC and static chain are ignored and set to 0.
function_descriptor()1239 void Assembler::function_descriptor() {
1240 if (ABI_USES_FUNCTION_DESCRIPTORS) {
1241 Label instructions;
1242 DCHECK_EQ(pc_offset(), 0);
1243 emit_label_addr(&instructions);
1244 dp(0);
1245 dp(0);
1246 bind(&instructions);
1247 }
1248 }
1249
1250
instructions_required_for_mov(Register dst,const Operand & src) const1251 int Assembler::instructions_required_for_mov(Register dst,
1252 const Operand& src) const {
1253 bool canOptimize =
1254 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1255 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1256 if (ConstantPoolAccessIsInOverflow()) {
1257 return kMovInstructionsConstantPool + 1;
1258 }
1259 return kMovInstructionsConstantPool;
1260 }
1261 DCHECK(!canOptimize);
1262 return kMovInstructionsNoConstantPool;
1263 }
1264
1265
use_constant_pool_for_mov(Register dst,const Operand & src,bool canOptimize) const1266 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
1267 bool canOptimize) const {
1268 if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
1269 // If there is no constant pool available, we must use a mov
1270 // immediate sequence.
1271 return false;
1272 }
1273 intptr_t value = src.immediate();
1274 #if V8_TARGET_ARCH_PPC64
1275 bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1276 #else
1277 bool allowOverflow = !(canOptimize || dst == r0);
1278 #endif
1279 if (canOptimize && is_int16(value)) {
1280 // Prefer a single-instruction load-immediate.
1281 return false;
1282 }
1283 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1284 // Prefer non-relocatable two-instruction bitwise-mov32 over
1285 // overflow sequence.
1286 return false;
1287 }
1288
1289 return true;
1290 }
1291
1292
EnsureSpaceFor(int space_needed)1293 void Assembler::EnsureSpaceFor(int space_needed) {
1294 if (buffer_space() <= (kGap + space_needed)) {
1295 GrowBuffer(space_needed);
1296 }
1297 }
1298
1299
must_output_reloc_info(const Assembler * assembler) const1300 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1301 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1302 if (assembler != nullptr && assembler->predictable_code_size()) return true;
1303 return assembler->options().record_reloc_info_for_serialization;
1304 } else if (RelocInfo::IsNone(rmode_)) {
1305 return false;
1306 }
1307 return true;
1308 }
1309
1310
1311 // Primarily used for loading constants
1312 // This should really move to be in macro-assembler as it
1313 // is really a pseudo instruction
1314 // Some usages of this intend for a FIXED_SEQUENCE to be used
1315 // Todo - break this dependency so we can optimize mov() in general
1316 // and only use the generic version when we require a fixed sequence
mov(Register dst,const Operand & src)1317 void Assembler::mov(Register dst, const Operand& src) {
1318 intptr_t value;
1319 if (src.IsHeapObjectRequest()) {
1320 RequestHeapObject(src.heap_object_request());
1321 value = 0;
1322 } else {
1323 value = src.immediate();
1324 }
1325 bool relocatable = src.must_output_reloc_info(this);
1326 bool canOptimize;
1327
1328 canOptimize =
1329 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1330
1331 if (!src.IsHeapObjectRequest() &&
1332 use_constant_pool_for_mov(dst, src, canOptimize)) {
1333 DCHECK(is_constant_pool_available());
1334 if (relocatable) {
1335 RecordRelocInfo(src.rmode_);
1336 }
1337 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1338 #if V8_TARGET_ARCH_PPC64
1339 if (access == ConstantPoolEntry::OVERFLOWED) {
1340 addis(dst, kConstantPoolRegister, Operand::Zero());
1341 ld(dst, MemOperand(dst, 0));
1342 } else {
1343 ld(dst, MemOperand(kConstantPoolRegister, 0));
1344 }
1345 #else
1346 if (access == ConstantPoolEntry::OVERFLOWED) {
1347 addis(dst, kConstantPoolRegister, Operand::Zero());
1348 lwz(dst, MemOperand(dst, 0));
1349 } else {
1350 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1351 }
1352 #endif
1353 return;
1354 }
1355
1356 if (canOptimize) {
1357 if (is_int16(value)) {
1358 li(dst, Operand(value));
1359 } else {
1360 uint16_t u16;
1361 #if V8_TARGET_ARCH_PPC64
1362 if (is_int32(value)) {
1363 #endif
1364 lis(dst, Operand(value >> 16));
1365 #if V8_TARGET_ARCH_PPC64
1366 } else {
1367 if (is_int48(value)) {
1368 li(dst, Operand(value >> 32));
1369 } else {
1370 lis(dst, Operand(value >> 48));
1371 u16 = ((value >> 32) & 0xFFFF);
1372 if (u16) {
1373 ori(dst, dst, Operand(u16));
1374 }
1375 }
1376 sldi(dst, dst, Operand(32));
1377 u16 = ((value >> 16) & 0xFFFF);
1378 if (u16) {
1379 oris(dst, dst, Operand(u16));
1380 }
1381 }
1382 #endif
1383 u16 = (value & 0xFFFF);
1384 if (u16) {
1385 ori(dst, dst, Operand(u16));
1386 }
1387 }
1388 return;
1389 }
1390
1391 DCHECK(!canOptimize);
1392 if (relocatable) {
1393 RecordRelocInfo(src.rmode_);
1394 }
1395 bitwise_mov(dst, value);
1396 }
1397
1398
bitwise_mov(Register dst,intptr_t value)1399 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1400 BlockTrampolinePoolScope block_trampoline_pool(this);
1401 #if V8_TARGET_ARCH_PPC64
1402 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1403 int32_t lo_32 = static_cast<int32_t>(value);
1404 int hi_word = static_cast<int>(hi_32 >> 16);
1405 int lo_word = static_cast<int>(hi_32 & 0xFFFF);
1406 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1407 ori(dst, dst, Operand(lo_word));
1408 sldi(dst, dst, Operand(32));
1409 hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1410 lo_word = static_cast<int>(lo_32 & 0xFFFF);
1411 oris(dst, dst, Operand(hi_word));
1412 ori(dst, dst, Operand(lo_word));
1413 #else
1414 int hi_word = static_cast<int>(value >> 16);
1415 int lo_word = static_cast<int>(value & 0xFFFF);
1416 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1417 ori(dst, dst, Operand(lo_word));
1418 #endif
1419 }
1420
1421
bitwise_mov32(Register dst,int32_t value)1422 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1423 BlockTrampolinePoolScope block_trampoline_pool(this);
1424 int hi_word = static_cast<int>(value >> 16);
1425 int lo_word = static_cast<int>(value & 0xFFFF);
1426 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1427 ori(dst, dst, Operand(lo_word));
1428 }
1429
1430
bitwise_add32(Register dst,Register src,int32_t value)1431 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1432 BlockTrampolinePoolScope block_trampoline_pool(this);
1433 if (is_int16(value)) {
1434 addi(dst, src, Operand(value));
1435 nop();
1436 } else {
1437 int hi_word = static_cast<int>(value >> 16);
1438 int lo_word = static_cast<int>(value & 0xFFFF);
1439 if (lo_word & 0x8000) hi_word++;
1440 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1441 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1442 }
1443 }
1444
1445
mov_label_offset(Register dst,Label * label)1446 void Assembler::mov_label_offset(Register dst, Label* label) {
1447 int position = link(label);
1448 if (label->is_bound()) {
1449 // Load the position of the label relative to the generated code object.
1450 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1451 } else {
1452 // Encode internal reference to unbound label. We use a dummy opcode
1453 // such that it won't collide with any opcode that might appear in the
1454 // label's chain. Encode the destination register in the 2nd instruction.
1455 int link = position - pc_offset();
1456 DCHECK_EQ(0, link & 3);
1457 link >>= 2;
1458 DCHECK(is_int26(link));
1459
1460 // When the label is bound, these instructions will be patched
1461 // with a 2 instruction mov sequence that will load the
1462 // destination register with the position of the label from the
1463 // beginning of the code.
1464 //
1465 // target_at extracts the link and target_at_put patches the instructions.
1466 BlockTrampolinePoolScope block_trampoline_pool(this);
1467 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1468 emit(dst.code());
1469 }
1470 }
1471
1472
add_label_offset(Register dst,Register base,Label * label,int delta)1473 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1474 int delta) {
1475 int position = link(label);
1476 if (label->is_bound()) {
1477 // dst = base + position + delta
1478 position += delta;
1479 bitwise_add32(dst, base, position);
1480 } else {
1481 // Encode internal reference to unbound label. We use a dummy opcode
1482 // such that it won't collide with any opcode that might appear in the
1483 // label's chain. Encode the operands in the 2nd instruction.
1484 int link = position - pc_offset();
1485 DCHECK_EQ(0, link & 3);
1486 link >>= 2;
1487 DCHECK(is_int26(link));
1488 BlockTrampolinePoolScope block_trampoline_pool(this);
1489
1490 emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1491 : kUnboundAddLabelLongOffsetOpcode) |
1492 (link & kImm26Mask));
1493 emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
1494
1495 if (!is_int22(delta)) {
1496 emit(delta);
1497 }
1498 }
1499 }
1500
1501
mov_label_addr(Register dst,Label * label)1502 void Assembler::mov_label_addr(Register dst, Label* label) {
1503 CheckBuffer();
1504 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1505 int position = link(label);
1506 if (label->is_bound()) {
1507 // Keep internal references relative until EmitRelocations.
1508 bitwise_mov(dst, position);
1509 } else {
1510 // Encode internal reference to unbound label. We use a dummy opcode
1511 // such that it won't collide with any opcode that might appear in the
1512 // label's chain. Encode the destination register in the 2nd instruction.
1513 int link = position - pc_offset();
1514 DCHECK_EQ(0, link & 3);
1515 link >>= 2;
1516 DCHECK(is_int26(link));
1517
1518 // When the label is bound, these instructions will be patched
1519 // with a multi-instruction mov sequence that will load the
1520 // destination register with the address of the label.
1521 //
1522 // target_at extracts the link and target_at_put patches the instructions.
1523 BlockTrampolinePoolScope block_trampoline_pool(this);
1524 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1525 emit(dst.code());
1526 DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1527 for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1528 }
1529 }
1530
1531
emit_label_addr(Label * label)1532 void Assembler::emit_label_addr(Label* label) {
1533 CheckBuffer();
1534 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1535 int position = link(label);
1536 if (label->is_bound()) {
1537 // Keep internal references relative until EmitRelocations.
1538 dp(position);
1539 } else {
1540 // Encode internal reference to unbound label. We use a dummy opcode
1541 // such that it won't collide with any opcode that might appear in the
1542 // label's chain.
1543 int link = position - pc_offset();
1544 DCHECK_EQ(0, link & 3);
1545 link >>= 2;
1546 DCHECK(is_int26(link));
1547
1548 // When the label is bound, the instruction(s) will be patched
1549 // as a jump table entry containing the label address. target_at extracts
1550 // the link and target_at_put patches the instruction(s).
1551 BlockTrampolinePoolScope block_trampoline_pool(this);
1552 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1553 #if V8_TARGET_ARCH_PPC64
1554 nop();
1555 #endif
1556 }
1557 }
1558
1559
1560 // Special register instructions
crxor(int bt,int ba,int bb)1561 void Assembler::crxor(int bt, int ba, int bb) {
1562 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1563 }
1564
1565
creqv(int bt,int ba,int bb)1566 void Assembler::creqv(int bt, int ba, int bb) {
1567 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1568 }
1569
1570
mflr(Register dst)1571 void Assembler::mflr(Register dst) {
1572 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1573 }
1574
1575
mtlr(Register src)1576 void Assembler::mtlr(Register src) {
1577 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1578 }
1579
1580
mtctr(Register src)1581 void Assembler::mtctr(Register src) {
1582 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1583 }
1584
1585
mtxer(Register src)1586 void Assembler::mtxer(Register src) {
1587 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1588 }
1589
1590
mcrfs(CRegister cr,FPSCRBit bit)1591 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1592 DCHECK_LT(static_cast<int>(bit), 32);
1593 int bf = cr.code();
1594 int bfa = bit / CRWIDTH;
1595 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1596 }
1597
1598
mfcr(Register dst)1599 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1600
1601
1602 #if V8_TARGET_ARCH_PPC64
mffprd(Register dst,DoubleRegister src)1603 void Assembler::mffprd(Register dst, DoubleRegister src) {
1604 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1605 }
1606
1607
mffprwz(Register dst,DoubleRegister src)1608 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1609 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1610 }
1611
1612
mtfprd(DoubleRegister dst,Register src)1613 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1614 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1615 }
1616
1617
mtfprwz(DoubleRegister dst,Register src)1618 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1619 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1620 }
1621
1622
mtfprwa(DoubleRegister dst,Register src)1623 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1624 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1625 }
1626 #endif
1627
1628
1629 // Exception-generating instructions and debugging support.
1630 // Stops with a non-negative code less than kNumOfWatchedStops support
1631 // enabling/disabling and a counter feature. See simulator-ppc.h .
stop(const char * msg,Condition cond,int32_t code,CRegister cr)1632 void Assembler::stop(const char* msg, Condition cond, int32_t code,
1633 CRegister cr) {
1634 if (cond != al) {
1635 Label skip;
1636 b(NegateCondition(cond), &skip, cr);
1637 bkpt(0);
1638 bind(&skip);
1639 } else {
1640 bkpt(0);
1641 }
1642 }
1643
bkpt(uint32_t imm16)1644 void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1645
dcbf(Register ra,Register rb)1646 void Assembler::dcbf(Register ra, Register rb) {
1647 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1648 }
1649
1650
sync()1651 void Assembler::sync() { emit(EXT2 | SYNC); }
1652
1653
lwsync()1654 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1655
1656
icbi(Register ra,Register rb)1657 void Assembler::icbi(Register ra, Register rb) {
1658 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1659 }
1660
1661
isync()1662 void Assembler::isync() { emit(EXT1 | ISYNC); }
1663
1664
1665 // Floating point support
1666
lfd(const DoubleRegister frt,const MemOperand & src)1667 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1668 int offset = src.offset();
1669 Register ra = src.ra();
1670 DCHECK(ra != r0);
1671 CHECK(is_int16(offset));
1672 int imm16 = offset & kImm16Mask;
1673 // could be x_form instruction with some casting magic
1674 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1675 }
1676
1677
lfdu(const DoubleRegister frt,const MemOperand & src)1678 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1679 int offset = src.offset();
1680 Register ra = src.ra();
1681 DCHECK(ra != r0);
1682 CHECK(is_int16(offset));
1683 int imm16 = offset & kImm16Mask;
1684 // could be x_form instruction with some casting magic
1685 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1686 }
1687
1688
lfs(const DoubleRegister frt,const MemOperand & src)1689 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1690 int offset = src.offset();
1691 Register ra = src.ra();
1692 CHECK(is_int16(offset));
1693 DCHECK(ra != r0);
1694 int imm16 = offset & kImm16Mask;
1695 // could be x_form instruction with some casting magic
1696 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1697 }
1698
1699
lfsu(const DoubleRegister frt,const MemOperand & src)1700 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1701 int offset = src.offset();
1702 Register ra = src.ra();
1703 CHECK(is_int16(offset));
1704 DCHECK(ra != r0);
1705 int imm16 = offset & kImm16Mask;
1706 // could be x_form instruction with some casting magic
1707 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1708 }
1709
1710
stfd(const DoubleRegister frs,const MemOperand & src)1711 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1712 int offset = src.offset();
1713 Register ra = src.ra();
1714 CHECK(is_int16(offset));
1715 DCHECK(ra != r0);
1716 int imm16 = offset & kImm16Mask;
1717 // could be x_form instruction with some casting magic
1718 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1719 }
1720
1721
stfdu(const DoubleRegister frs,const MemOperand & src)1722 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1723 int offset = src.offset();
1724 Register ra = src.ra();
1725 CHECK(is_int16(offset));
1726 DCHECK(ra != r0);
1727 int imm16 = offset & kImm16Mask;
1728 // could be x_form instruction with some casting magic
1729 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1730 }
1731
1732
stfs(const DoubleRegister frs,const MemOperand & src)1733 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1734 int offset = src.offset();
1735 Register ra = src.ra();
1736 CHECK(is_int16(offset));
1737 DCHECK(ra != r0);
1738 int imm16 = offset & kImm16Mask;
1739 // could be x_form instruction with some casting magic
1740 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1741 }
1742
1743
stfsu(const DoubleRegister frs,const MemOperand & src)1744 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1745 int offset = src.offset();
1746 Register ra = src.ra();
1747 CHECK(is_int16(offset));
1748 DCHECK(ra != r0);
1749 int imm16 = offset & kImm16Mask;
1750 // could be x_form instruction with some casting magic
1751 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1752 }
1753
1754
fsub(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frb,RCBit rc)1755 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1756 const DoubleRegister frb, RCBit rc) {
1757 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1758 }
1759
1760
fadd(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frb,RCBit rc)1761 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1762 const DoubleRegister frb, RCBit rc) {
1763 a_form(EXT4 | FADD, frt, fra, frb, rc);
1764 }
1765
1766
fmul(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,RCBit rc)1767 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1768 const DoubleRegister frc, RCBit rc) {
1769 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1770 rc);
1771 }
1772
1773
fdiv(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frb,RCBit rc)1774 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1775 const DoubleRegister frb, RCBit rc) {
1776 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1777 }
1778
1779
fcmpu(const DoubleRegister fra,const DoubleRegister frb,CRegister cr)1780 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1781 CRegister cr) {
1782 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1783 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1784 }
1785
1786
fmr(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1787 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1788 RCBit rc) {
1789 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1790 }
1791
1792
fctiwz(const DoubleRegister frt,const DoubleRegister frb)1793 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1794 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1795 }
1796
1797
fctiw(const DoubleRegister frt,const DoubleRegister frb)1798 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1799 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1800 }
1801
1802
frin(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1803 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
1804 RCBit rc) {
1805 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1806 }
1807
1808
friz(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1809 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
1810 RCBit rc) {
1811 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1812 }
1813
1814
frip(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1815 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
1816 RCBit rc) {
1817 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1818 }
1819
1820
frim(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1821 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
1822 RCBit rc) {
1823 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1824 }
1825
1826
frsp(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1827 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1828 RCBit rc) {
1829 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1830 }
1831
1832
fcfid(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1833 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
1834 RCBit rc) {
1835 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1836 }
1837
1838
fcfidu(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1839 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1840 RCBit rc) {
1841 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1842 }
1843
1844
fcfidus(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1845 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1846 RCBit rc) {
1847 emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1848 }
1849
1850
fcfids(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1851 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
1852 RCBit rc) {
1853 emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1854 }
1855
1856
fctid(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1857 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
1858 RCBit rc) {
1859 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1860 }
1861
1862
fctidz(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1863 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
1864 RCBit rc) {
1865 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1866 }
1867
1868
fctidu(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1869 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
1870 RCBit rc) {
1871 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1872 }
1873
1874
fctiduz(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1875 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1876 RCBit rc) {
1877 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1878 }
1879
1880
fsel(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,const DoubleRegister frb,RCBit rc)1881 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
1882 const DoubleRegister frc, const DoubleRegister frb,
1883 RCBit rc) {
1884 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1885 frc.code() * B6 | rc);
1886 }
1887
1888
fneg(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1889 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
1890 RCBit rc) {
1891 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1892 }
1893
1894
mtfsb0(FPSCRBit bit,RCBit rc)1895 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1896 DCHECK_LT(static_cast<int>(bit), 32);
1897 int bt = bit;
1898 emit(EXT4 | MTFSB0 | bt * B21 | rc);
1899 }
1900
1901
mtfsb1(FPSCRBit bit,RCBit rc)1902 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1903 DCHECK_LT(static_cast<int>(bit), 32);
1904 int bt = bit;
1905 emit(EXT4 | MTFSB1 | bt * B21 | rc);
1906 }
1907
1908
mtfsfi(int bf,int immediate,RCBit rc)1909 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
1910 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1911 }
1912
1913
mffs(const DoubleRegister frt,RCBit rc)1914 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
1915 emit(EXT4 | MFFS | frt.code() * B21 | rc);
1916 }
1917
1918
mtfsf(const DoubleRegister frb,bool L,int FLM,bool W,RCBit rc)1919 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
1920 RCBit rc) {
1921 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1922 }
1923
1924
fsqrt(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1925 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1926 RCBit rc) {
1927 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1928 }
1929
1930
fabs(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1931 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
1932 RCBit rc) {
1933 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1934 }
1935
1936
fmadd(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,const DoubleRegister frb,RCBit rc)1937 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
1938 const DoubleRegister frc, const DoubleRegister frb,
1939 RCBit rc) {
1940 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1941 frc.code() * B6 | rc);
1942 }
1943
1944
fmsub(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,const DoubleRegister frb,RCBit rc)1945 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
1946 const DoubleRegister frc, const DoubleRegister frb,
1947 RCBit rc) {
1948 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1949 frc.code() * B6 | rc);
1950 }
1951
1952 // Pseudo instructions.
nop(int type)1953 void Assembler::nop(int type) {
1954 Register reg = r0;
1955 switch (type) {
1956 case NON_MARKING_NOP:
1957 reg = r0;
1958 break;
1959 case GROUP_ENDING_NOP:
1960 reg = r2;
1961 break;
1962 case DEBUG_BREAK_NOP:
1963 reg = r3;
1964 break;
1965 default:
1966 UNIMPLEMENTED();
1967 }
1968
1969 ori(reg, reg, Operand::Zero());
1970 }
1971
1972
IsNop(Instr instr,int type)1973 bool Assembler::IsNop(Instr instr, int type) {
1974 int reg = 0;
1975 switch (type) {
1976 case NON_MARKING_NOP:
1977 reg = 0;
1978 break;
1979 case GROUP_ENDING_NOP:
1980 reg = 2;
1981 break;
1982 case DEBUG_BREAK_NOP:
1983 reg = 3;
1984 break;
1985 default:
1986 UNIMPLEMENTED();
1987 }
1988 return instr == (ORI | reg * B21 | reg * B16);
1989 }
1990
1991
GrowBuffer(int needed)1992 void Assembler::GrowBuffer(int needed) {
1993 if (!own_buffer_) FATAL("external code buffer is too small");
1994
1995 // Compute new buffer size.
1996 CodeDesc desc; // the new buffer
1997 if (buffer_size_ < 4 * KB) {
1998 desc.buffer_size = 4 * KB;
1999 } else if (buffer_size_ < 1 * MB) {
2000 desc.buffer_size = 2 * buffer_size_;
2001 } else {
2002 desc.buffer_size = buffer_size_ + 1 * MB;
2003 }
2004 int space = buffer_space() + (desc.buffer_size - buffer_size_);
2005 if (space < needed) {
2006 desc.buffer_size += needed - space;
2007 }
2008
2009 // Some internal data structures overflow for very large buffers,
2010 // they must ensure that kMaximalBufferSize is not too large.
2011 if (desc.buffer_size > kMaximalBufferSize) {
2012 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
2013 }
2014
2015 // Set up new buffer.
2016 desc.buffer = NewArray<byte>(desc.buffer_size);
2017 desc.origin = this;
2018
2019 desc.instr_size = pc_offset();
2020 desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
2021
2022 // Copy the data.
2023 intptr_t pc_delta = desc.buffer - buffer_;
2024 intptr_t rc_delta =
2025 (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
2026 memmove(desc.buffer, buffer_, desc.instr_size);
2027 memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
2028 desc.reloc_size);
2029
2030 // Switch buffers.
2031 DeleteArray(buffer_);
2032 buffer_ = desc.buffer;
2033 buffer_size_ = desc.buffer_size;
2034 pc_ += pc_delta;
2035 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
2036 reloc_info_writer.last_pc() + pc_delta);
2037
2038 // Nothing else to do here since we keep all internal references and
2039 // deferred relocation entries relative to the buffer (until
2040 // EmitRelocations).
2041 }
2042
2043
db(uint8_t data)2044 void Assembler::db(uint8_t data) {
2045 CheckBuffer();
2046 *reinterpret_cast<uint8_t*>(pc_) = data;
2047 pc_ += sizeof(uint8_t);
2048 }
2049
2050
dd(uint32_t data)2051 void Assembler::dd(uint32_t data) {
2052 CheckBuffer();
2053 *reinterpret_cast<uint32_t*>(pc_) = data;
2054 pc_ += sizeof(uint32_t);
2055 }
2056
2057
dq(uint64_t value)2058 void Assembler::dq(uint64_t value) {
2059 CheckBuffer();
2060 *reinterpret_cast<uint64_t*>(pc_) = value;
2061 pc_ += sizeof(uint64_t);
2062 }
2063
2064
dp(uintptr_t data)2065 void Assembler::dp(uintptr_t data) {
2066 CheckBuffer();
2067 *reinterpret_cast<uintptr_t*>(pc_) = data;
2068 pc_ += sizeof(uintptr_t);
2069 }
2070
2071
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)2072 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
2073 if (options().disable_reloc_info_for_patching) return;
2074 if (RelocInfo::IsNone(rmode) ||
2075 // Don't record external references unless the heap will be serialized.
2076 (RelocInfo::IsOnlyForSerializer(rmode) &&
2077 !options().record_reloc_info_for_serialization && !emit_debug_code())) {
2078 return;
2079 }
2080 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
2081 relocations_.push_back(rinfo);
2082 }
2083
2084
EmitRelocations()2085 void Assembler::EmitRelocations() {
2086 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
2087
2088 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
2089 it != relocations_.end(); it++) {
2090 RelocInfo::Mode rmode = it->rmode();
2091 Address pc = reinterpret_cast<Address>(buffer_) + it->position();
2092 RelocInfo rinfo(pc, rmode, it->data(), nullptr);
2093
2094 // Fix up internal references now that they are guaranteed to be bound.
2095 if (RelocInfo::IsInternalReference(rmode)) {
2096 // Jump table entry
2097 intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
2098 Memory<Address>(pc) = reinterpret_cast<Address>(buffer_) + pos;
2099 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
2100 // mov sequence
2101 intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
2102 set_target_address_at(pc, 0, reinterpret_cast<Address>(buffer_) + pos,
2103 SKIP_ICACHE_FLUSH);
2104 }
2105
2106 reloc_info_writer.Write(&rinfo);
2107 }
2108 }
2109
2110
BlockTrampolinePoolFor(int instructions)2111 void Assembler::BlockTrampolinePoolFor(int instructions) {
2112 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
2113 }
2114
2115
CheckTrampolinePool()2116 void Assembler::CheckTrampolinePool() {
2117 // Some small sequences of instructions must not be broken up by the
2118 // insertion of a trampoline pool; such sequences are protected by setting
2119 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
2120 // which are both checked here. Also, recursive calls to CheckTrampolinePool
2121 // are blocked by trampoline_pool_blocked_nesting_.
2122 if (trampoline_pool_blocked_nesting_ > 0) return;
2123 if (pc_offset() < no_trampoline_pool_before_) {
2124 next_trampoline_check_ = no_trampoline_pool_before_;
2125 return;
2126 }
2127
2128 DCHECK(!trampoline_emitted_);
2129 if (tracked_branch_count_ > 0) {
2130 int size = tracked_branch_count_ * kInstrSize;
2131
2132 // As we are only going to emit trampoline once, we need to prevent any
2133 // further emission.
2134 trampoline_emitted_ = true;
2135 next_trampoline_check_ = kMaxInt;
2136
2137 // First we emit jump, then we emit trampoline pool.
2138 b(size + kInstrSize, LeaveLK);
2139 for (int i = size; i > 0; i -= kInstrSize) {
2140 b(i, LeaveLK);
2141 }
2142
2143 trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
2144 }
2145 }
2146
PatchingAssembler(const AssemblerOptions & options,byte * address,int instructions)2147 PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
2148 byte* address, int instructions)
2149 : Assembler(options, address, instructions * kInstrSize + kGap) {
2150 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
2151 }
2152
~PatchingAssembler()2153 PatchingAssembler::~PatchingAssembler() {
2154 // Check that the code was patched as expected.
2155 DCHECK_EQ(pc_, buffer_ + buffer_size_ - kGap);
2156 DCHECK_EQ(reloc_info_writer.pos(), buffer_ + buffer_size_);
2157 }
2158
2159 } // namespace internal
2160 } // namespace v8
2161
2162 #endif // V8_TARGET_ARCH_PPC
2163