1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
2 // All Rights Reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions
6 // are met:
7 //
8 // - Redistributions of source code must retain the above copyright notice,
9 // this list of conditions and the following disclaimer.
10 //
11 // - Redistribution in binary form must reproduce the above copyright
12 // notice, this list of conditions and the following disclaimer in the
13 // documentation and/or other materials provided with the
14 // distribution.
15 //
16 // - Neither the name of Sun Microsystems or the names of contributors may
17 // be used to endorse or promote products derived from this software without
18 // specific prior written permission.
19 //
20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
31 // OF THE POSSIBILITY OF SUCH DAMAGE.
32
33 // The original source code covered by the above license above has been
34 // modified significantly by Google Inc.
35 // Copyright 2014 the V8 project authors. All rights reserved.
36
37 #include "src/codegen/ppc/assembler-ppc.h"
38
39 #if V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
40
41 #include "src/base/bits.h"
42 #include "src/base/cpu.h"
43 #include "src/codegen/macro-assembler.h"
44 #include "src/codegen/ppc/assembler-ppc-inl.h"
45 #include "src/codegen/string-constants.h"
46 #include "src/deoptimizer/deoptimizer.h"
47
48 namespace v8 {
49 namespace internal {
50
51 // Get the CPU features enabled by the build.
CpuFeaturesImpliedByCompiler()52 static unsigned CpuFeaturesImpliedByCompiler() {
53 unsigned answer = 0;
54 return answer;
55 }
56
ProbeImpl(bool cross_compile)57 void CpuFeatures::ProbeImpl(bool cross_compile) {
58 supported_ |= CpuFeaturesImpliedByCompiler();
59 icache_line_size_ = 128;
60
61 // Only use statically determined features for cross compile (snapshot).
62 if (cross_compile) return;
63
64 // Detect whether frim instruction is supported (POWER5+)
65 // For now we will just check for processors we know do not
66 // support it
67 #ifndef USE_SIMULATOR
68 // Probe for additional features at runtime.
69 base::CPU cpu;
70 if (cpu.part() == base::CPU::PPC_POWER9) {
71 supported_ |= (1u << MODULO);
72 }
73 #if V8_TARGET_ARCH_PPC64
74 if (cpu.part() == base::CPU::PPC_POWER8) {
75 supported_ |= (1u << FPR_GPR_MOV);
76 }
77 #endif
78 if (cpu.part() == base::CPU::PPC_POWER6 ||
79 cpu.part() == base::CPU::PPC_POWER7 ||
80 cpu.part() == base::CPU::PPC_POWER8) {
81 supported_ |= (1u << LWSYNC);
82 }
83 if (cpu.part() == base::CPU::PPC_POWER7 ||
84 cpu.part() == base::CPU::PPC_POWER8) {
85 supported_ |= (1u << ISELECT);
86 supported_ |= (1u << VSX);
87 }
88 #if V8_OS_LINUX
89 if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
90 // Assume support
91 supported_ |= (1u << FPU);
92 }
93 if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
94 icache_line_size_ = cpu.icache_line_size();
95 }
96 #elif V8_OS_AIX
97 // Assume support FP support and default cache line size
98 supported_ |= (1u << FPU);
99 #endif
100 #else // Simulator
101 supported_ |= (1u << FPU);
102 supported_ |= (1u << LWSYNC);
103 supported_ |= (1u << ISELECT);
104 supported_ |= (1u << VSX);
105 supported_ |= (1u << MODULO);
106 #if V8_TARGET_ARCH_PPC64
107 supported_ |= (1u << FPR_GPR_MOV);
108 #endif
109 #endif
110 }
111
PrintTarget()112 void CpuFeatures::PrintTarget() {
113 const char* ppc_arch = nullptr;
114
115 #if V8_TARGET_ARCH_PPC64
116 ppc_arch = "ppc64";
117 #else
118 ppc_arch = "ppc";
119 #endif
120
121 printf("target %s\n", ppc_arch);
122 }
123
PrintFeatures()124 void CpuFeatures::PrintFeatures() {
125 printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
126 printf("FPR_GPR_MOV=%d\n", CpuFeatures::IsSupported(FPR_GPR_MOV));
127 printf("LWSYNC=%d\n", CpuFeatures::IsSupported(LWSYNC));
128 printf("ISELECT=%d\n", CpuFeatures::IsSupported(ISELECT));
129 printf("VSX=%d\n", CpuFeatures::IsSupported(VSX));
130 printf("MODULO=%d\n", CpuFeatures::IsSupported(MODULO));
131 }
132
ToRegister(int num)133 Register ToRegister(int num) {
134 DCHECK(num >= 0 && num < kNumRegisters);
135 const Register kRegisters[] = {r0, sp, r2, r3, r4, r5, r6, r7,
136 r8, r9, r10, r11, ip, r13, r14, r15,
137 r16, r17, r18, r19, r20, r21, r22, r23,
138 r24, r25, r26, r27, r28, r29, r30, fp};
139 return kRegisters[num];
140 }
141
142 // -----------------------------------------------------------------------------
143 // Implementation of RelocInfo
144
145 const int RelocInfo::kApplyMask =
146 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
147 RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED);
148
IsCodedSpecially()149 bool RelocInfo::IsCodedSpecially() {
150 // The deserializer needs to know whether a pointer is specially
151 // coded. Being specially coded on PPC means that it is a lis/ori
152 // instruction sequence or is a constant pool entry, and these are
153 // always the case inside code objects.
154 return true;
155 }
156
IsInConstantPool()157 bool RelocInfo::IsInConstantPool() {
158 if (FLAG_enable_embedded_constant_pool && constant_pool_ != kNullAddress) {
159 return Assembler::IsConstantPoolLoadStart(pc_);
160 }
161 return false;
162 }
163
wasm_call_tag() const164 uint32_t RelocInfo::wasm_call_tag() const {
165 DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
166 return static_cast<uint32_t>(
167 Assembler::target_address_at(pc_, constant_pool_));
168 }
169
170 // -----------------------------------------------------------------------------
171 // Implementation of Operand and MemOperand
172 // See assembler-ppc-inl.h for inlined constructors
173
Operand(Handle<HeapObject> handle)174 Operand::Operand(Handle<HeapObject> handle) {
175 rm_ = no_reg;
176 value_.immediate = static_cast<intptr_t>(handle.address());
177 rmode_ = RelocInfo::FULL_EMBEDDED_OBJECT;
178 }
179
EmbeddedNumber(double value)180 Operand Operand::EmbeddedNumber(double value) {
181 int32_t smi;
182 if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
183 Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
184 result.is_heap_object_request_ = true;
185 result.value_.heap_object_request = HeapObjectRequest(value);
186 return result;
187 }
188
EmbeddedStringConstant(const StringConstantBase * str)189 Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
190 Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
191 result.is_heap_object_request_ = true;
192 result.value_.heap_object_request = HeapObjectRequest(str);
193 return result;
194 }
195
MemOperand(Register rn,int32_t offset)196 MemOperand::MemOperand(Register rn, int32_t offset)
197 : ra_(rn), offset_(offset), rb_(no_reg) {}
198
MemOperand(Register ra,Register rb)199 MemOperand::MemOperand(Register ra, Register rb)
200 : ra_(ra), offset_(0), rb_(rb) {}
201
AllocateAndInstallRequestedHeapObjects(Isolate * isolate)202 void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
203 DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
204 for (auto& request : heap_object_requests_) {
205 Handle<HeapObject> object;
206 switch (request.kind()) {
207 case HeapObjectRequest::kHeapNumber: {
208 object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
209 request.heap_number());
210 break;
211 }
212 case HeapObjectRequest::kStringConstant: {
213 const StringConstantBase* str = request.string();
214 CHECK_NOT_NULL(str);
215 object = str->AllocateStringConstant(isolate);
216 break;
217 }
218 }
219 Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
220 Address constant_pool = kNullAddress;
221 set_target_address_at(pc, constant_pool, object.address(),
222 SKIP_ICACHE_FLUSH);
223 }
224 }
225
226 // -----------------------------------------------------------------------------
227 // Specific instructions, constants, and masks.
228
Assembler(const AssemblerOptions & options,std::unique_ptr<AssemblerBuffer> buffer)229 Assembler::Assembler(const AssemblerOptions& options,
230 std::unique_ptr<AssemblerBuffer> buffer)
231 : AssemblerBase(options, std::move(buffer)),
232 scratch_register_list_(ip.bit()),
233 constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
234 reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
235
236 no_trampoline_pool_before_ = 0;
237 trampoline_pool_blocked_nesting_ = 0;
238 constant_pool_entry_sharing_blocked_nesting_ = 0;
239 next_trampoline_check_ = kMaxInt;
240 internal_trampoline_exception_ = false;
241 last_bound_pos_ = 0;
242 optimizable_cmpi_pos_ = -1;
243 trampoline_emitted_ = FLAG_force_long_branches;
244 tracked_branch_count_ = 0;
245 relocations_.reserve(128);
246 }
247
GetCode(Isolate * isolate,CodeDesc * desc,SafepointTableBuilder * safepoint_table_builder,int handler_table_offset)248 void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
249 SafepointTableBuilder* safepoint_table_builder,
250 int handler_table_offset) {
251 // As a crutch to avoid having to add manual Align calls wherever we use a
252 // raw workflow to create Code objects (mostly in tests), add another Align
253 // call here. It does no harm - the end of the Code object is aligned to the
254 // (larger) kCodeAlignment anyways.
255 // TODO(jgruber): Consider moving responsibility for proper alignment to
256 // metadata table builders (safepoint, handler, constant pool, code
257 // comments).
258 DataAlign(Code::kMetadataAlignment);
259
260 // Emit constant pool if necessary.
261 int constant_pool_size = EmitConstantPool();
262
263 EmitRelocations();
264
265 int code_comments_size = WriteCodeComments();
266
267 AllocateAndInstallRequestedHeapObjects(isolate);
268
269 // Set up code descriptor.
270 // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
271 // this point to make CodeDesc initialization less fiddly.
272
273 const int instruction_size = pc_offset();
274 const int code_comments_offset = instruction_size - code_comments_size;
275 const int constant_pool_offset = code_comments_offset - constant_pool_size;
276 const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
277 ? constant_pool_offset
278 : handler_table_offset;
279 const int safepoint_table_offset =
280 (safepoint_table_builder == kNoSafepointTable)
281 ? handler_table_offset2
282 : safepoint_table_builder->GetCodeOffset();
283 const int reloc_info_offset =
284 static_cast<int>(reloc_info_writer.pos() - buffer_->start());
285 CodeDesc::Initialize(desc, this, safepoint_table_offset,
286 handler_table_offset2, constant_pool_offset,
287 code_comments_offset, reloc_info_offset);
288 }
289
Align(int m)290 void Assembler::Align(int m) {
291 DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
292 DCHECK_EQ(pc_offset() & (kInstrSize - 1), 0);
293 while ((pc_offset() & (m - 1)) != 0) {
294 nop();
295 }
296 }
297
CodeTargetAlign()298 void Assembler::CodeTargetAlign() { Align(8); }
299
GetCondition(Instr instr)300 Condition Assembler::GetCondition(Instr instr) {
301 switch (instr & kCondMask) {
302 case BT:
303 return eq;
304 case BF:
305 return ne;
306 default:
307 UNIMPLEMENTED();
308 }
309 return al;
310 }
311
IsLis(Instr instr)312 bool Assembler::IsLis(Instr instr) {
313 return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr) == r0;
314 }
315
IsLi(Instr instr)316 bool Assembler::IsLi(Instr instr) {
317 return ((instr & kOpcodeMask) == ADDI) && GetRA(instr) == r0;
318 }
319
IsAddic(Instr instr)320 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
321
IsOri(Instr instr)322 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
323
IsBranch(Instr instr)324 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
325
GetRA(Instr instr)326 Register Assembler::GetRA(Instr instr) {
327 return Register::from_code(Instruction::RAValue(instr));
328 }
329
GetRB(Instr instr)330 Register Assembler::GetRB(Instr instr) {
331 return Register::from_code(Instruction::RBValue(instr));
332 }
333
334 #if V8_TARGET_ARCH_PPC64
335 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
Is64BitLoadIntoR12(Instr instr1,Instr instr2,Instr instr3,Instr instr4,Instr instr5)336 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
337 Instr instr4, Instr instr5) {
338 // Check the instructions are indeed a five part load (into r12)
339 // 3d800000 lis r12, 0
340 // 618c0000 ori r12, r12, 0
341 // 798c07c6 rldicr r12, r12, 32, 31
342 // 658c00c3 oris r12, r12, 195
343 // 618ccd40 ori r12, r12, 52544
344 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C) &&
345 (instr3 == 0x798C07C6) && ((instr4 >> 16) == 0x658C) &&
346 ((instr5 >> 16) == 0x618C));
347 }
348 #else
349 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
Is32BitLoadIntoR12(Instr instr1,Instr instr2)350 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
351 // Check the instruction is indeed a two part load (into r12)
352 // 3d802553 lis r12, 9555
353 // 618c5000 ori r12, r12, 20480
354 return (((instr1 >> 16) == 0x3D80) && ((instr2 >> 16) == 0x618C));
355 }
356 #endif
357
IsCmpRegister(Instr instr)358 bool Assembler::IsCmpRegister(Instr instr) {
359 return (((instr & kOpcodeMask) == EXT2) &&
360 ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
361 }
362
IsRlwinm(Instr instr)363 bool Assembler::IsRlwinm(Instr instr) {
364 return ((instr & kOpcodeMask) == RLWINMX);
365 }
366
IsAndi(Instr instr)367 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
368
369 #if V8_TARGET_ARCH_PPC64
IsRldicl(Instr instr)370 bool Assembler::IsRldicl(Instr instr) {
371 return (((instr & kOpcodeMask) == EXT5) &&
372 ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
373 }
374 #endif
375
IsCmpImmediate(Instr instr)376 bool Assembler::IsCmpImmediate(Instr instr) {
377 return ((instr & kOpcodeMask) == CMPI);
378 }
379
IsCrSet(Instr instr)380 bool Assembler::IsCrSet(Instr instr) {
381 return (((instr & kOpcodeMask) == EXT1) &&
382 ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
383 }
384
GetCmpImmediateRegister(Instr instr)385 Register Assembler::GetCmpImmediateRegister(Instr instr) {
386 DCHECK(IsCmpImmediate(instr));
387 return GetRA(instr);
388 }
389
GetCmpImmediateRawImmediate(Instr instr)390 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
391 DCHECK(IsCmpImmediate(instr));
392 return instr & kOff16Mask;
393 }
394
395 // Labels refer to positions in the (to be) generated code.
396 // There are bound, linked, and unused labels.
397 //
398 // Bound labels refer to known positions in the already
399 // generated code. pos() is the position the label refers to.
400 //
401 // Linked labels refer to unknown positions in the code
402 // to be generated; pos() is the position of the last
403 // instruction using the label.
404
405 // The link chain is terminated by a negative code position (must be aligned)
406 const int kEndOfChain = -4;
407
408 // Dummy opcodes for unbound label mov instructions or jump table entries.
409 enum {
410 kUnboundMovLabelOffsetOpcode = 0 << 26,
411 kUnboundAddLabelOffsetOpcode = 1 << 26,
412 kUnboundAddLabelLongOffsetOpcode = 2 << 26,
413 kUnboundMovLabelAddrOpcode = 3 << 26,
414 kUnboundJumpTableEntryOpcode = 4 << 26
415 };
416
target_at(int pos)417 int Assembler::target_at(int pos) {
418 Instr instr = instr_at(pos);
419 // check which type of branch this is 16 or 26 bit offset
420 uint32_t opcode = instr & kOpcodeMask;
421 int link;
422 switch (opcode) {
423 case BX:
424 link = SIGN_EXT_IMM26(instr & kImm26Mask);
425 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
426 break;
427 case BCX:
428 link = SIGN_EXT_IMM16((instr & kImm16Mask));
429 link &= ~(kAAMask | kLKMask); // discard AA|LK bits if present
430 break;
431 case kUnboundMovLabelOffsetOpcode:
432 case kUnboundAddLabelOffsetOpcode:
433 case kUnboundAddLabelLongOffsetOpcode:
434 case kUnboundMovLabelAddrOpcode:
435 case kUnboundJumpTableEntryOpcode:
436 link = SIGN_EXT_IMM26(instr & kImm26Mask);
437 link <<= 2;
438 break;
439 default:
440 DCHECK(false);
441 return -1;
442 }
443
444 if (link == 0) return kEndOfChain;
445 return pos + link;
446 }
447
target_at_put(int pos,int target_pos,bool * is_branch)448 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
449 Instr instr = instr_at(pos);
450 uint32_t opcode = instr & kOpcodeMask;
451
452 if (is_branch != nullptr) {
453 *is_branch = (opcode == BX || opcode == BCX);
454 }
455
456 switch (opcode) {
457 case BX: {
458 int imm26 = target_pos - pos;
459 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
460 if (imm26 == kInstrSize && !(instr & kLKMask)) {
461 // Branch to next instr without link.
462 instr = ORI; // nop: ori, 0,0,0
463 } else {
464 instr &= ((~kImm26Mask) | kAAMask | kLKMask);
465 instr |= (imm26 & kImm26Mask);
466 }
467 instr_at_put(pos, instr);
468 break;
469 }
470 case BCX: {
471 int imm16 = target_pos - pos;
472 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
473 if (imm16 == kInstrSize && !(instr & kLKMask)) {
474 // Branch to next instr without link.
475 instr = ORI; // nop: ori, 0,0,0
476 } else {
477 instr &= ((~kImm16Mask) | kAAMask | kLKMask);
478 instr |= (imm16 & kImm16Mask);
479 }
480 instr_at_put(pos, instr);
481 break;
482 }
483 case kUnboundMovLabelOffsetOpcode: {
484 // Load the position of the label relative to the generated code object
485 // pointer in a register.
486 Register dst = Register::from_code(instr_at(pos + kInstrSize));
487 int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
488 PatchingAssembler patcher(
489 options(), reinterpret_cast<byte*>(buffer_start_ + pos), 2);
490 patcher.bitwise_mov32(dst, offset);
491 break;
492 }
493 case kUnboundAddLabelLongOffsetOpcode:
494 case kUnboundAddLabelOffsetOpcode: {
495 // dst = base + position + immediate
496 Instr operands = instr_at(pos + kInstrSize);
497 Register dst = Register::from_code((operands >> 27) & 0x1F);
498 Register base = Register::from_code((operands >> 22) & 0x1F);
499 int32_t delta = (opcode == kUnboundAddLabelLongOffsetOpcode)
500 ? static_cast<int32_t>(instr_at(pos + 2 * kInstrSize))
501 : (SIGN_EXT_IMM22(operands & kImm22Mask));
502 int32_t offset = target_pos + delta;
503 PatchingAssembler patcher(
504 options(), reinterpret_cast<byte*>(buffer_start_ + pos),
505 2 + static_cast<int32_t>(opcode == kUnboundAddLabelLongOffsetOpcode));
506 patcher.bitwise_add32(dst, base, offset);
507 if (opcode == kUnboundAddLabelLongOffsetOpcode) patcher.nop();
508 break;
509 }
510 case kUnboundMovLabelAddrOpcode: {
511 // Load the address of the label in a register.
512 Register dst = Register::from_code(instr_at(pos + kInstrSize));
513 PatchingAssembler patcher(options(),
514 reinterpret_cast<byte*>(buffer_start_ + pos),
515 kMovInstructionsNoConstantPool);
516 // Keep internal references relative until EmitRelocations.
517 patcher.bitwise_mov(dst, target_pos);
518 break;
519 }
520 case kUnboundJumpTableEntryOpcode: {
521 PatchingAssembler patcher(options(),
522 reinterpret_cast<byte*>(buffer_start_ + pos),
523 kSystemPointerSize / kInstrSize);
524 // Keep internal references relative until EmitRelocations.
525 patcher.dp(target_pos);
526 break;
527 }
528 default:
529 DCHECK(false);
530 break;
531 }
532 }
533
max_reach_from(int pos)534 int Assembler::max_reach_from(int pos) {
535 Instr instr = instr_at(pos);
536 uint32_t opcode = instr & kOpcodeMask;
537
538 // check which type of branch this is 16 or 26 bit offset
539 switch (opcode) {
540 case BX:
541 return 26;
542 case BCX:
543 return 16;
544 case kUnboundMovLabelOffsetOpcode:
545 case kUnboundAddLabelOffsetOpcode:
546 case kUnboundMovLabelAddrOpcode:
547 case kUnboundJumpTableEntryOpcode:
548 return 0; // no limit on reach
549 }
550
551 DCHECK(false);
552 return 0;
553 }
554
bind_to(Label * L,int pos)555 void Assembler::bind_to(Label* L, int pos) {
556 DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position
557 int32_t trampoline_pos = kInvalidSlotPos;
558 bool is_branch = false;
559 while (L->is_linked()) {
560 int fixup_pos = L->pos();
561 int32_t offset = pos - fixup_pos;
562 int maxReach = max_reach_from(fixup_pos);
563 next(L); // call next before overwriting link with target at fixup_pos
564 if (maxReach && is_intn(offset, maxReach) == false) {
565 if (trampoline_pos == kInvalidSlotPos) {
566 trampoline_pos = get_trampoline_entry();
567 CHECK_NE(trampoline_pos, kInvalidSlotPos);
568 target_at_put(trampoline_pos, pos);
569 }
570 target_at_put(fixup_pos, trampoline_pos);
571 } else {
572 target_at_put(fixup_pos, pos, &is_branch);
573 }
574 }
575 L->bind_to(pos);
576
577 if (!trampoline_emitted_ && is_branch) {
578 UntrackBranch();
579 }
580
581 // Keep track of the last bound label so we don't eliminate any instructions
582 // before a bound label.
583 if (pos > last_bound_pos_) last_bound_pos_ = pos;
584 }
585
bind(Label * L)586 void Assembler::bind(Label* L) {
587 DCHECK(!L->is_bound()); // label can only be bound once
588 bind_to(L, pc_offset());
589 }
590
next(Label * L)591 void Assembler::next(Label* L) {
592 DCHECK(L->is_linked());
593 int link = target_at(L->pos());
594 if (link == kEndOfChain) {
595 L->Unuse();
596 } else {
597 DCHECK_GE(link, 0);
598 L->link_to(link);
599 }
600 }
601
is_near(Label * L,Condition cond)602 bool Assembler::is_near(Label* L, Condition cond) {
603 DCHECK(L->is_bound());
604 if (L->is_bound() == false) return false;
605
606 int maxReach = ((cond == al) ? 26 : 16);
607 int offset = L->pos() - pc_offset();
608
609 return is_intn(offset, maxReach);
610 }
611
a_form(Instr instr,DoubleRegister frt,DoubleRegister fra,DoubleRegister frb,RCBit r)612 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
613 DoubleRegister frb, RCBit r) {
614 emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
615 }
616
d_form(Instr instr,Register rt,Register ra,const intptr_t val,bool signed_disp)617 void Assembler::d_form(Instr instr, Register rt, Register ra,
618 const intptr_t val, bool signed_disp) {
619 if (signed_disp) {
620 if (!is_int16(val)) {
621 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
622 }
623 CHECK(is_int16(val));
624 } else {
625 if (!is_uint16(val)) {
626 PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
627 ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
628 val, val, is_uint16(val), kImm16Mask);
629 }
630 CHECK(is_uint16(val));
631 }
632 emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
633 }
634
xo_form(Instr instr,Register rt,Register ra,Register rb,OEBit o,RCBit r)635 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
636 OEBit o, RCBit r) {
637 emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
638 }
639
md_form(Instr instr,Register ra,Register rs,int shift,int maskbit,RCBit r)640 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
641 int maskbit, RCBit r) {
642 int sh0_4 = shift & 0x1F;
643 int sh5 = (shift >> 5) & 0x1;
644 int m0_4 = maskbit & 0x1F;
645 int m5 = (maskbit >> 5) & 0x1;
646
647 emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
648 m5 * B5 | sh5 * B1 | r);
649 }
650
mds_form(Instr instr,Register ra,Register rs,Register rb,int maskbit,RCBit r)651 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
652 int maskbit, RCBit r) {
653 int m0_4 = maskbit & 0x1F;
654 int m5 = (maskbit >> 5) & 0x1;
655
656 emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
657 m5 * B5 | r);
658 }
659
660 // Returns the next free trampoline entry.
get_trampoline_entry()661 int32_t Assembler::get_trampoline_entry() {
662 int32_t trampoline_entry = kInvalidSlotPos;
663
664 if (!internal_trampoline_exception_) {
665 trampoline_entry = trampoline_.take_slot();
666
667 if (kInvalidSlotPos == trampoline_entry) {
668 internal_trampoline_exception_ = true;
669 }
670 }
671 return trampoline_entry;
672 }
673
link(Label * L)674 int Assembler::link(Label* L) {
675 int position;
676 if (L->is_bound()) {
677 position = L->pos();
678 } else {
679 if (L->is_linked()) {
680 position = L->pos(); // L's link
681 } else {
682 // was: target_pos = kEndOfChain;
683 // However, using self to mark the first reference
684 // should avoid most instances of branch offset overflow. See
685 // target_at() for where this is converted back to kEndOfChain.
686 position = pc_offset();
687 }
688 L->link_to(pc_offset());
689 }
690
691 return position;
692 }
693
694 // Branch instructions.
695
bclr(BOfield bo,int condition_bit,LKBit lk)696 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
697 emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
698 }
699
bcctr(BOfield bo,int condition_bit,LKBit lk)700 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
701 emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
702 }
703
704 // Pseudo op - branch to link register
blr()705 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
706
707 // Pseudo op - branch to count register -- used for "jump"
bctr()708 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
709
bctrl()710 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
711
bc(int branch_offset,BOfield bo,int condition_bit,LKBit lk)712 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
713 int imm16 = branch_offset;
714 CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
715 emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
716 }
717
b(int branch_offset,LKBit lk)718 void Assembler::b(int branch_offset, LKBit lk) {
719 int imm26 = branch_offset;
720 CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
721 emit(BX | (imm26 & kImm26Mask) | lk);
722 }
723
xori(Register dst,Register src,const Operand & imm)724 void Assembler::xori(Register dst, Register src, const Operand& imm) {
725 d_form(XORI, src, dst, imm.immediate(), false);
726 }
727
xoris(Register ra,Register rs,const Operand & imm)728 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
729 d_form(XORIS, rs, ra, imm.immediate(), false);
730 }
731
rlwinm(Register ra,Register rs,int sh,int mb,int me,RCBit rc)732 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
733 RCBit rc) {
734 sh &= 0x1F;
735 mb &= 0x1F;
736 me &= 0x1F;
737 emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
738 me << 1 | rc);
739 }
740
rlwnm(Register ra,Register rs,Register rb,int mb,int me,RCBit rc)741 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
742 RCBit rc) {
743 mb &= 0x1F;
744 me &= 0x1F;
745 emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
746 me << 1 | rc);
747 }
748
rlwimi(Register ra,Register rs,int sh,int mb,int me,RCBit rc)749 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
750 RCBit rc) {
751 sh &= 0x1F;
752 mb &= 0x1F;
753 me &= 0x1F;
754 emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
755 me << 1 | rc);
756 }
757
slwi(Register dst,Register src,const Operand & val,RCBit rc)758 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
759 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
760 rlwinm(dst, src, val.immediate(), 0, 31 - val.immediate(), rc);
761 }
762
srwi(Register dst,Register src,const Operand & val,RCBit rc)763 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
764 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
765 rlwinm(dst, src, 32 - val.immediate(), val.immediate(), 31, rc);
766 }
767
clrrwi(Register dst,Register src,const Operand & val,RCBit rc)768 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
769 RCBit rc) {
770 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
771 rlwinm(dst, src, 0, 0, 31 - val.immediate(), rc);
772 }
773
clrlwi(Register dst,Register src,const Operand & val,RCBit rc)774 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
775 RCBit rc) {
776 DCHECK((32 > val.immediate()) && (val.immediate() >= 0));
777 rlwinm(dst, src, 0, val.immediate(), 31, rc);
778 }
779
rotlw(Register ra,Register rs,Register rb,RCBit r)780 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
781 rlwnm(ra, rs, rb, 0, 31, r);
782 }
783
rotlwi(Register ra,Register rs,int sh,RCBit r)784 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
785 rlwinm(ra, rs, sh, 0, 31, r);
786 }
787
rotrwi(Register ra,Register rs,int sh,RCBit r)788 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
789 rlwinm(ra, rs, 32 - sh, 0, 31, r);
790 }
791
subi(Register dst,Register src,const Operand & imm)792 void Assembler::subi(Register dst, Register src, const Operand& imm) {
793 addi(dst, src, Operand(-(imm.immediate())));
794 }
795
addc(Register dst,Register src1,Register src2,OEBit o,RCBit r)796 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
797 RCBit r) {
798 xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
799 }
800
adde(Register dst,Register src1,Register src2,OEBit o,RCBit r)801 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
802 RCBit r) {
803 xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
804 }
805
addze(Register dst,Register src1,OEBit o,RCBit r)806 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
807 // a special xo_form
808 emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
809 }
810
sub(Register dst,Register src1,Register src2,OEBit o,RCBit r)811 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
812 RCBit r) {
813 xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
814 }
815
subc(Register dst,Register src1,Register src2,OEBit o,RCBit r)816 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
817 RCBit r) {
818 xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
819 }
820
sube(Register dst,Register src1,Register src2,OEBit o,RCBit r)821 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
822 RCBit r) {
823 xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
824 }
825
subfic(Register dst,Register src,const Operand & imm)826 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
827 d_form(SUBFIC, dst, src, imm.immediate(), true);
828 }
829
add(Register dst,Register src1,Register src2,OEBit o,RCBit r)830 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
831 RCBit r) {
832 xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
833 }
834
835 // Multiply low word
mullw(Register dst,Register src1,Register src2,OEBit o,RCBit r)836 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
837 RCBit r) {
838 xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
839 }
840
841 // Multiply hi word
mulhw(Register dst,Register src1,Register src2,RCBit r)842 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
843 xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
844 }
845
846 // Multiply hi word unsigned
mulhwu(Register dst,Register src1,Register src2,RCBit r)847 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
848 xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
849 }
850
851 // Divide word
divw(Register dst,Register src1,Register src2,OEBit o,RCBit r)852 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
853 RCBit r) {
854 xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
855 }
856
857 // Divide word unsigned
divwu(Register dst,Register src1,Register src2,OEBit o,RCBit r)858 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
859 RCBit r) {
860 xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
861 }
862
addi(Register dst,Register src,const Operand & imm)863 void Assembler::addi(Register dst, Register src, const Operand& imm) {
864 DCHECK(src != r0); // use li instead to show intent
865 d_form(ADDI, dst, src, imm.immediate(), true);
866 }
867
addis(Register dst,Register src,const Operand & imm)868 void Assembler::addis(Register dst, Register src, const Operand& imm) {
869 DCHECK(src != r0); // use lis instead to show intent
870 d_form(ADDIS, dst, src, imm.immediate(), true);
871 }
872
addic(Register dst,Register src,const Operand & imm)873 void Assembler::addic(Register dst, Register src, const Operand& imm) {
874 d_form(ADDIC, dst, src, imm.immediate(), true);
875 }
876
andi(Register ra,Register rs,const Operand & imm)877 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
878 d_form(ANDIx, rs, ra, imm.immediate(), false);
879 }
880
andis(Register ra,Register rs,const Operand & imm)881 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
882 d_form(ANDISx, rs, ra, imm.immediate(), false);
883 }
884
ori(Register ra,Register rs,const Operand & imm)885 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
886 d_form(ORI, rs, ra, imm.immediate(), false);
887 }
888
oris(Register dst,Register src,const Operand & imm)889 void Assembler::oris(Register dst, Register src, const Operand& imm) {
890 d_form(ORIS, src, dst, imm.immediate(), false);
891 }
892
cmpi(Register src1,const Operand & src2,CRegister cr)893 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
894 intptr_t imm16 = src2.immediate();
895 #if V8_TARGET_ARCH_PPC64
896 int L = 1;
897 #else
898 int L = 0;
899 #endif
900 DCHECK(is_int16(imm16));
901 DCHECK(cr.code() >= 0 && cr.code() <= 7);
902 imm16 &= kImm16Mask;
903 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
904 }
905
cmpli(Register src1,const Operand & src2,CRegister cr)906 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
907 uintptr_t uimm16 = src2.immediate();
908 #if V8_TARGET_ARCH_PPC64
909 int L = 1;
910 #else
911 int L = 0;
912 #endif
913 DCHECK(is_uint16(uimm16));
914 DCHECK(cr.code() >= 0 && cr.code() <= 7);
915 uimm16 &= kImm16Mask;
916 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
917 }
918
cmpwi(Register src1,const Operand & src2,CRegister cr)919 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
920 intptr_t imm16 = src2.immediate();
921 int L = 0;
922 int pos = pc_offset();
923 DCHECK(is_int16(imm16));
924 DCHECK(cr.code() >= 0 && cr.code() <= 7);
925 imm16 &= kImm16Mask;
926
927 // For cmpwi against 0, save postition and cr for later examination
928 // of potential optimization.
929 if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
930 optimizable_cmpi_pos_ = pos;
931 cmpi_cr_ = cr;
932 }
933 emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
934 }
935
cmplwi(Register src1,const Operand & src2,CRegister cr)936 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
937 uintptr_t uimm16 = src2.immediate();
938 int L = 0;
939 DCHECK(is_uint16(uimm16));
940 DCHECK(cr.code() >= 0 && cr.code() <= 7);
941 uimm16 &= kImm16Mask;
942 emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
943 }
944
isel(Register rt,Register ra,Register rb,int cb)945 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
946 emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
947 cb * B6);
948 }
949
950 // Pseudo op - load immediate
li(Register dst,const Operand & imm)951 void Assembler::li(Register dst, const Operand& imm) {
952 d_form(ADDI, dst, r0, imm.immediate(), true);
953 }
954
lis(Register dst,const Operand & imm)955 void Assembler::lis(Register dst, const Operand& imm) {
956 d_form(ADDIS, dst, r0, imm.immediate(), true);
957 }
958
959 // Pseudo op - move register
mr(Register dst,Register src)960 void Assembler::mr(Register dst, Register src) {
961 // actually or(dst, src, src)
962 orx(dst, src, src);
963 }
964
lbz(Register dst,const MemOperand & src)965 void Assembler::lbz(Register dst, const MemOperand& src) {
966 DCHECK(src.ra_ != r0);
967 d_form(LBZ, dst, src.ra(), src.offset(), true);
968 }
969
lhz(Register dst,const MemOperand & src)970 void Assembler::lhz(Register dst, const MemOperand& src) {
971 DCHECK(src.ra_ != r0);
972 d_form(LHZ, dst, src.ra(), src.offset(), true);
973 }
974
lwz(Register dst,const MemOperand & src)975 void Assembler::lwz(Register dst, const MemOperand& src) {
976 DCHECK(src.ra_ != r0);
977 d_form(LWZ, dst, src.ra(), src.offset(), true);
978 }
979
lwzu(Register dst,const MemOperand & src)980 void Assembler::lwzu(Register dst, const MemOperand& src) {
981 DCHECK(src.ra_ != r0);
982 d_form(LWZU, dst, src.ra(), src.offset(), true);
983 }
984
lha(Register dst,const MemOperand & src)985 void Assembler::lha(Register dst, const MemOperand& src) {
986 DCHECK(src.ra_ != r0);
987 d_form(LHA, dst, src.ra(), src.offset(), true);
988 }
989
lwa(Register dst,const MemOperand & src)990 void Assembler::lwa(Register dst, const MemOperand& src) {
991 #if V8_TARGET_ARCH_PPC64
992 int offset = src.offset();
993 DCHECK(src.ra_ != r0);
994 CHECK(!(offset & 3) && is_int16(offset));
995 offset = kImm16Mask & offset;
996 emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
997 #else
998 lwz(dst, src);
999 #endif
1000 }
1001
stb(Register dst,const MemOperand & src)1002 void Assembler::stb(Register dst, const MemOperand& src) {
1003 DCHECK(src.ra_ != r0);
1004 d_form(STB, dst, src.ra(), src.offset(), true);
1005 }
1006
sth(Register dst,const MemOperand & src)1007 void Assembler::sth(Register dst, const MemOperand& src) {
1008 DCHECK(src.ra_ != r0);
1009 d_form(STH, dst, src.ra(), src.offset(), true);
1010 }
1011
stw(Register dst,const MemOperand & src)1012 void Assembler::stw(Register dst, const MemOperand& src) {
1013 DCHECK(src.ra_ != r0);
1014 d_form(STW, dst, src.ra(), src.offset(), true);
1015 }
1016
stwu(Register dst,const MemOperand & src)1017 void Assembler::stwu(Register dst, const MemOperand& src) {
1018 DCHECK(src.ra_ != r0);
1019 d_form(STWU, dst, src.ra(), src.offset(), true);
1020 }
1021
neg(Register rt,Register ra,OEBit o,RCBit r)1022 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
1023 emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
1024 }
1025
1026 #if V8_TARGET_ARCH_PPC64
1027 // 64bit specific instructions
ld(Register rd,const MemOperand & src)1028 void Assembler::ld(Register rd, const MemOperand& src) {
1029 int offset = src.offset();
1030 DCHECK(src.ra_ != r0);
1031 CHECK(!(offset & 3) && is_int16(offset));
1032 offset = kImm16Mask & offset;
1033 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
1034 }
1035
ldu(Register rd,const MemOperand & src)1036 void Assembler::ldu(Register rd, const MemOperand& src) {
1037 int offset = src.offset();
1038 DCHECK(src.ra_ != r0);
1039 CHECK(!(offset & 3) && is_int16(offset));
1040 offset = kImm16Mask & offset;
1041 emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
1042 }
1043
std(Register rs,const MemOperand & src)1044 void Assembler::std(Register rs, const MemOperand& src) {
1045 int offset = src.offset();
1046 DCHECK(src.ra_ != r0);
1047 CHECK(!(offset & 3) && is_int16(offset));
1048 offset = kImm16Mask & offset;
1049 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
1050 }
1051
stdu(Register rs,const MemOperand & src)1052 void Assembler::stdu(Register rs, const MemOperand& src) {
1053 int offset = src.offset();
1054 DCHECK(src.ra_ != r0);
1055 CHECK(!(offset & 3) && is_int16(offset));
1056 offset = kImm16Mask & offset;
1057 emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
1058 }
1059
rldic(Register ra,Register rs,int sh,int mb,RCBit r)1060 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
1061 md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
1062 }
1063
rldicl(Register ra,Register rs,int sh,int mb,RCBit r)1064 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
1065 md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
1066 }
1067
rldcl(Register ra,Register rs,Register rb,int mb,RCBit r)1068 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
1069 mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
1070 }
1071
rldicr(Register ra,Register rs,int sh,int me,RCBit r)1072 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
1073 md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
1074 }
1075
sldi(Register dst,Register src,const Operand & val,RCBit rc)1076 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
1077 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1078 rldicr(dst, src, val.immediate(), 63 - val.immediate(), rc);
1079 }
1080
srdi(Register dst,Register src,const Operand & val,RCBit rc)1081 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
1082 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1083 rldicl(dst, src, 64 - val.immediate(), val.immediate(), rc);
1084 }
1085
clrrdi(Register dst,Register src,const Operand & val,RCBit rc)1086 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
1087 RCBit rc) {
1088 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1089 rldicr(dst, src, 0, 63 - val.immediate(), rc);
1090 }
1091
clrldi(Register dst,Register src,const Operand & val,RCBit rc)1092 void Assembler::clrldi(Register dst, Register src, const Operand& val,
1093 RCBit rc) {
1094 DCHECK((64 > val.immediate()) && (val.immediate() >= 0));
1095 rldicl(dst, src, 0, val.immediate(), rc);
1096 }
1097
rldimi(Register ra,Register rs,int sh,int mb,RCBit r)1098 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
1099 md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
1100 }
1101
sradi(Register ra,Register rs,int sh,RCBit r)1102 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
1103 int sh0_4 = sh & 0x1F;
1104 int sh5 = (sh >> 5) & 0x1;
1105
1106 emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
1107 sh5 * B1 | r);
1108 }
1109
rotld(Register ra,Register rs,Register rb,RCBit r)1110 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
1111 rldcl(ra, rs, rb, 0, r);
1112 }
1113
rotldi(Register ra,Register rs,int sh,RCBit r)1114 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
1115 rldicl(ra, rs, sh, 0, r);
1116 }
1117
rotrdi(Register ra,Register rs,int sh,RCBit r)1118 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
1119 rldicl(ra, rs, 64 - sh, 0, r);
1120 }
1121
mulld(Register dst,Register src1,Register src2,OEBit o,RCBit r)1122 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
1123 RCBit r) {
1124 xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
1125 }
1126
divd(Register dst,Register src1,Register src2,OEBit o,RCBit r)1127 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
1128 RCBit r) {
1129 xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
1130 }
1131
divdu(Register dst,Register src1,Register src2,OEBit o,RCBit r)1132 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
1133 RCBit r) {
1134 xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
1135 }
1136 #endif
1137
instructions_required_for_mov(Register dst,const Operand & src) const1138 int Assembler::instructions_required_for_mov(Register dst,
1139 const Operand& src) const {
1140 bool canOptimize =
1141 !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
1142 if (use_constant_pool_for_mov(dst, src, canOptimize)) {
1143 if (ConstantPoolAccessIsInOverflow()) {
1144 return kMovInstructionsConstantPool + 1;
1145 }
1146 return kMovInstructionsConstantPool;
1147 }
1148 DCHECK(!canOptimize);
1149 return kMovInstructionsNoConstantPool;
1150 }
1151
use_constant_pool_for_mov(Register dst,const Operand & src,bool canOptimize) const1152 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
1153 bool canOptimize) const {
1154 if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
1155 // If there is no constant pool available, we must use a mov
1156 // immediate sequence.
1157 return false;
1158 }
1159 intptr_t value = src.immediate();
1160 #if V8_TARGET_ARCH_PPC64
1161 bool allowOverflow = !((canOptimize && is_int32(value)) || dst == r0);
1162 #else
1163 bool allowOverflow = !(canOptimize || dst == r0);
1164 #endif
1165 if (canOptimize && is_int16(value)) {
1166 // Prefer a single-instruction load-immediate.
1167 return false;
1168 }
1169 if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
1170 // Prefer non-relocatable two-instruction bitwise-mov32 over
1171 // overflow sequence.
1172 return false;
1173 }
1174
1175 return true;
1176 }
1177
EnsureSpaceFor(int space_needed)1178 void Assembler::EnsureSpaceFor(int space_needed) {
1179 if (buffer_space() <= (kGap + space_needed)) {
1180 GrowBuffer(space_needed);
1181 }
1182 }
1183
must_output_reloc_info(const Assembler * assembler) const1184 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
1185 if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
1186 if (assembler != nullptr && assembler->predictable_code_size()) return true;
1187 return assembler->options().record_reloc_info_for_serialization;
1188 } else if (RelocInfo::IsNone(rmode_)) {
1189 return false;
1190 }
1191 return true;
1192 }
1193
1194 // Primarily used for loading constants
1195 // This should really move to be in macro-assembler as it
1196 // is really a pseudo instruction
1197 // Some usages of this intend for a FIXED_SEQUENCE to be used
1198 // Todo - break this dependency so we can optimize mov() in general
1199 // and only use the generic version when we require a fixed sequence
mov(Register dst,const Operand & src)1200 void Assembler::mov(Register dst, const Operand& src) {
1201 intptr_t value;
1202 if (src.IsHeapObjectRequest()) {
1203 RequestHeapObject(src.heap_object_request());
1204 value = 0;
1205 } else {
1206 value = src.immediate();
1207 }
1208 bool relocatable = src.must_output_reloc_info(this);
1209 bool canOptimize;
1210
1211 canOptimize =
1212 !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
1213
1214 if (!src.IsHeapObjectRequest() &&
1215 use_constant_pool_for_mov(dst, src, canOptimize)) {
1216 DCHECK(is_constant_pool_available());
1217 if (relocatable) {
1218 RecordRelocInfo(src.rmode_);
1219 }
1220 ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
1221 #if V8_TARGET_ARCH_PPC64
1222 if (access == ConstantPoolEntry::OVERFLOWED) {
1223 addis(dst, kConstantPoolRegister, Operand::Zero());
1224 ld(dst, MemOperand(dst, 0));
1225 } else {
1226 ld(dst, MemOperand(kConstantPoolRegister, 0));
1227 }
1228 #else
1229 if (access == ConstantPoolEntry::OVERFLOWED) {
1230 addis(dst, kConstantPoolRegister, Operand::Zero());
1231 lwz(dst, MemOperand(dst, 0));
1232 } else {
1233 lwz(dst, MemOperand(kConstantPoolRegister, 0));
1234 }
1235 #endif
1236 return;
1237 }
1238
1239 if (canOptimize) {
1240 if (is_int16(value)) {
1241 li(dst, Operand(value));
1242 } else {
1243 uint16_t u16;
1244 #if V8_TARGET_ARCH_PPC64
1245 if (is_int32(value)) {
1246 #endif
1247 lis(dst, Operand(value >> 16));
1248 #if V8_TARGET_ARCH_PPC64
1249 } else {
1250 if (is_int48(value)) {
1251 li(dst, Operand(value >> 32));
1252 } else {
1253 lis(dst, Operand(value >> 48));
1254 u16 = ((value >> 32) & 0xFFFF);
1255 if (u16) {
1256 ori(dst, dst, Operand(u16));
1257 }
1258 }
1259 sldi(dst, dst, Operand(32));
1260 u16 = ((value >> 16) & 0xFFFF);
1261 if (u16) {
1262 oris(dst, dst, Operand(u16));
1263 }
1264 }
1265 #endif
1266 u16 = (value & 0xFFFF);
1267 if (u16) {
1268 ori(dst, dst, Operand(u16));
1269 }
1270 }
1271 return;
1272 }
1273
1274 DCHECK(!canOptimize);
1275 if (relocatable) {
1276 RecordRelocInfo(src.rmode_);
1277 }
1278 bitwise_mov(dst, value);
1279 }
1280
bitwise_mov(Register dst,intptr_t value)1281 void Assembler::bitwise_mov(Register dst, intptr_t value) {
1282 BlockTrampolinePoolScope block_trampoline_pool(this);
1283 #if V8_TARGET_ARCH_PPC64
1284 int32_t hi_32 = static_cast<int32_t>(value >> 32);
1285 int32_t lo_32 = static_cast<int32_t>(value);
1286 int hi_word = static_cast<int>(hi_32 >> 16);
1287 int lo_word = static_cast<int>(hi_32 & 0xFFFF);
1288 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1289 ori(dst, dst, Operand(lo_word));
1290 sldi(dst, dst, Operand(32));
1291 hi_word = static_cast<int>(((lo_32 >> 16) & 0xFFFF));
1292 lo_word = static_cast<int>(lo_32 & 0xFFFF);
1293 oris(dst, dst, Operand(hi_word));
1294 ori(dst, dst, Operand(lo_word));
1295 #else
1296 int hi_word = static_cast<int>(value >> 16);
1297 int lo_word = static_cast<int>(value & 0xFFFF);
1298 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1299 ori(dst, dst, Operand(lo_word));
1300 #endif
1301 }
1302
bitwise_mov32(Register dst,int32_t value)1303 void Assembler::bitwise_mov32(Register dst, int32_t value) {
1304 BlockTrampolinePoolScope block_trampoline_pool(this);
1305 int hi_word = static_cast<int>(value >> 16);
1306 int lo_word = static_cast<int>(value & 0xFFFF);
1307 lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
1308 ori(dst, dst, Operand(lo_word));
1309 }
1310
bitwise_add32(Register dst,Register src,int32_t value)1311 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
1312 BlockTrampolinePoolScope block_trampoline_pool(this);
1313 if (is_int16(value)) {
1314 addi(dst, src, Operand(value));
1315 nop();
1316 } else {
1317 int hi_word = static_cast<int>(value >> 16);
1318 int lo_word = static_cast<int>(value & 0xFFFF);
1319 if (lo_word & 0x8000) hi_word++;
1320 addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
1321 addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
1322 }
1323 }
1324
mov_label_offset(Register dst,Label * label)1325 void Assembler::mov_label_offset(Register dst, Label* label) {
1326 int position = link(label);
1327 if (label->is_bound()) {
1328 // Load the position of the label relative to the generated code object.
1329 mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
1330 } else {
1331 // Encode internal reference to unbound label. We use a dummy opcode
1332 // such that it won't collide with any opcode that might appear in the
1333 // label's chain. Encode the destination register in the 2nd instruction.
1334 int link = position - pc_offset();
1335 DCHECK_EQ(0, link & 3);
1336 link >>= 2;
1337 DCHECK(is_int26(link));
1338
1339 // When the label is bound, these instructions will be patched
1340 // with a 2 instruction mov sequence that will load the
1341 // destination register with the position of the label from the
1342 // beginning of the code.
1343 //
1344 // target_at extracts the link and target_at_put patches the instructions.
1345 BlockTrampolinePoolScope block_trampoline_pool(this);
1346 emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
1347 emit(dst.code());
1348 }
1349 }
1350
add_label_offset(Register dst,Register base,Label * label,int delta)1351 void Assembler::add_label_offset(Register dst, Register base, Label* label,
1352 int delta) {
1353 int position = link(label);
1354 if (label->is_bound()) {
1355 // dst = base + position + delta
1356 position += delta;
1357 bitwise_add32(dst, base, position);
1358 } else {
1359 // Encode internal reference to unbound label. We use a dummy opcode
1360 // such that it won't collide with any opcode that might appear in the
1361 // label's chain. Encode the operands in the 2nd instruction.
1362 int link = position - pc_offset();
1363 DCHECK_EQ(0, link & 3);
1364 link >>= 2;
1365 DCHECK(is_int26(link));
1366 BlockTrampolinePoolScope block_trampoline_pool(this);
1367
1368 emit((is_int22(delta) ? kUnboundAddLabelOffsetOpcode
1369 : kUnboundAddLabelLongOffsetOpcode) |
1370 (link & kImm26Mask));
1371 emit(dst.code() * B27 | base.code() * B22 | (delta & kImm22Mask));
1372
1373 if (!is_int22(delta)) {
1374 emit(delta);
1375 }
1376 }
1377 }
1378
mov_label_addr(Register dst,Label * label)1379 void Assembler::mov_label_addr(Register dst, Label* label) {
1380 CheckBuffer();
1381 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
1382 int position = link(label);
1383 if (label->is_bound()) {
1384 // Keep internal references relative until EmitRelocations.
1385 bitwise_mov(dst, position);
1386 } else {
1387 // Encode internal reference to unbound label. We use a dummy opcode
1388 // such that it won't collide with any opcode that might appear in the
1389 // label's chain. Encode the destination register in the 2nd instruction.
1390 int link = position - pc_offset();
1391 DCHECK_EQ(0, link & 3);
1392 link >>= 2;
1393 DCHECK(is_int26(link));
1394
1395 // When the label is bound, these instructions will be patched
1396 // with a multi-instruction mov sequence that will load the
1397 // destination register with the address of the label.
1398 //
1399 // target_at extracts the link and target_at_put patches the instructions.
1400 BlockTrampolinePoolScope block_trampoline_pool(this);
1401 emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
1402 emit(dst.code());
1403 DCHECK_GE(kMovInstructionsNoConstantPool, 2);
1404 for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
1405 }
1406 }
1407
emit_label_addr(Label * label)1408 void Assembler::emit_label_addr(Label* label) {
1409 CheckBuffer();
1410 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
1411 int position = link(label);
1412 if (label->is_bound()) {
1413 // Keep internal references relative until EmitRelocations.
1414 dp(position);
1415 } else {
1416 // Encode internal reference to unbound label. We use a dummy opcode
1417 // such that it won't collide with any opcode that might appear in the
1418 // label's chain.
1419 int link = position - pc_offset();
1420 DCHECK_EQ(0, link & 3);
1421 link >>= 2;
1422 DCHECK(is_int26(link));
1423
1424 // When the label is bound, the instruction(s) will be patched
1425 // as a jump table entry containing the label address. target_at extracts
1426 // the link and target_at_put patches the instruction(s).
1427 BlockTrampolinePoolScope block_trampoline_pool(this);
1428 emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
1429 #if V8_TARGET_ARCH_PPC64
1430 nop();
1431 #endif
1432 }
1433 }
1434
1435 // Special register instructions
crxor(int bt,int ba,int bb)1436 void Assembler::crxor(int bt, int ba, int bb) {
1437 emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
1438 }
1439
creqv(int bt,int ba,int bb)1440 void Assembler::creqv(int bt, int ba, int bb) {
1441 emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
1442 }
1443
mflr(Register dst)1444 void Assembler::mflr(Register dst) {
1445 emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11); // Ignore RC bit
1446 }
1447
mtlr(Register src)1448 void Assembler::mtlr(Register src) {
1449 emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11); // Ignore RC bit
1450 }
1451
mtctr(Register src)1452 void Assembler::mtctr(Register src) {
1453 emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11); // Ignore RC bit
1454 }
1455
mtxer(Register src)1456 void Assembler::mtxer(Register src) {
1457 emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
1458 }
1459
mcrfs(CRegister cr,FPSCRBit bit)1460 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
1461 DCHECK_LT(static_cast<int>(bit), 32);
1462 int bf = cr.code();
1463 int bfa = bit / CRWIDTH;
1464 emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
1465 }
1466
mfcr(Register dst)1467 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
1468
1469 #if V8_TARGET_ARCH_PPC64
mffprd(Register dst,DoubleRegister src)1470 void Assembler::mffprd(Register dst, DoubleRegister src) {
1471 emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
1472 }
1473
mffprwz(Register dst,DoubleRegister src)1474 void Assembler::mffprwz(Register dst, DoubleRegister src) {
1475 emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
1476 }
1477
mtfprd(DoubleRegister dst,Register src)1478 void Assembler::mtfprd(DoubleRegister dst, Register src) {
1479 emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
1480 }
1481
mtfprwz(DoubleRegister dst,Register src)1482 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
1483 emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
1484 }
1485
mtfprwa(DoubleRegister dst,Register src)1486 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
1487 emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
1488 }
1489 #endif
1490
1491 // Exception-generating instructions and debugging support.
1492 // Stops with a non-negative code less than kNumOfWatchedStops support
1493 // enabling/disabling and a counter feature. See simulator-ppc.h .
stop(Condition cond,int32_t code,CRegister cr)1494 void Assembler::stop(Condition cond, int32_t code, CRegister cr) {
1495 if (cond != al) {
1496 Label skip;
1497 b(NegateCondition(cond), &skip, cr);
1498 bkpt(0);
1499 bind(&skip);
1500 } else {
1501 bkpt(0);
1502 }
1503 }
1504
bkpt(uint32_t imm16)1505 void Assembler::bkpt(uint32_t imm16) { emit(0x7D821008); }
1506
dcbf(Register ra,Register rb)1507 void Assembler::dcbf(Register ra, Register rb) {
1508 emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
1509 }
1510
sync()1511 void Assembler::sync() { emit(EXT2 | SYNC); }
1512
lwsync()1513 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
1514
icbi(Register ra,Register rb)1515 void Assembler::icbi(Register ra, Register rb) {
1516 emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
1517 }
1518
isync()1519 void Assembler::isync() { emit(EXT1 | ISYNC); }
1520
1521 // Floating point support
1522
lfd(const DoubleRegister frt,const MemOperand & src)1523 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
1524 int offset = src.offset();
1525 Register ra = src.ra();
1526 DCHECK(ra != r0);
1527 CHECK(is_int16(offset));
1528 int imm16 = offset & kImm16Mask;
1529 // could be x_form instruction with some casting magic
1530 emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
1531 }
1532
lfdu(const DoubleRegister frt,const MemOperand & src)1533 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
1534 int offset = src.offset();
1535 Register ra = src.ra();
1536 DCHECK(ra != r0);
1537 CHECK(is_int16(offset));
1538 int imm16 = offset & kImm16Mask;
1539 // could be x_form instruction with some casting magic
1540 emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
1541 }
1542
lfs(const DoubleRegister frt,const MemOperand & src)1543 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
1544 int offset = src.offset();
1545 Register ra = src.ra();
1546 CHECK(is_int16(offset));
1547 DCHECK(ra != r0);
1548 int imm16 = offset & kImm16Mask;
1549 // could be x_form instruction with some casting magic
1550 emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
1551 }
1552
lfsu(const DoubleRegister frt,const MemOperand & src)1553 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
1554 int offset = src.offset();
1555 Register ra = src.ra();
1556 CHECK(is_int16(offset));
1557 DCHECK(ra != r0);
1558 int imm16 = offset & kImm16Mask;
1559 // could be x_form instruction with some casting magic
1560 emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
1561 }
1562
stfd(const DoubleRegister frs,const MemOperand & src)1563 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
1564 int offset = src.offset();
1565 Register ra = src.ra();
1566 CHECK(is_int16(offset));
1567 DCHECK(ra != r0);
1568 int imm16 = offset & kImm16Mask;
1569 // could be x_form instruction with some casting magic
1570 emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
1571 }
1572
stfdu(const DoubleRegister frs,const MemOperand & src)1573 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
1574 int offset = src.offset();
1575 Register ra = src.ra();
1576 CHECK(is_int16(offset));
1577 DCHECK(ra != r0);
1578 int imm16 = offset & kImm16Mask;
1579 // could be x_form instruction with some casting magic
1580 emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
1581 }
1582
stfs(const DoubleRegister frs,const MemOperand & src)1583 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
1584 int offset = src.offset();
1585 Register ra = src.ra();
1586 CHECK(is_int16(offset));
1587 DCHECK(ra != r0);
1588 int imm16 = offset & kImm16Mask;
1589 // could be x_form instruction with some casting magic
1590 emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
1591 }
1592
stfsu(const DoubleRegister frs,const MemOperand & src)1593 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
1594 int offset = src.offset();
1595 Register ra = src.ra();
1596 CHECK(is_int16(offset));
1597 DCHECK(ra != r0);
1598 int imm16 = offset & kImm16Mask;
1599 // could be x_form instruction with some casting magic
1600 emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
1601 }
1602
fsub(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frb,RCBit rc)1603 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
1604 const DoubleRegister frb, RCBit rc) {
1605 a_form(EXT4 | FSUB, frt, fra, frb, rc);
1606 }
1607
fadd(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frb,RCBit rc)1608 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
1609 const DoubleRegister frb, RCBit rc) {
1610 a_form(EXT4 | FADD, frt, fra, frb, rc);
1611 }
1612
fmul(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,RCBit rc)1613 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
1614 const DoubleRegister frc, RCBit rc) {
1615 emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
1616 rc);
1617 }
1618
fdiv(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frb,RCBit rc)1619 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
1620 const DoubleRegister frb, RCBit rc) {
1621 a_form(EXT4 | FDIV, frt, fra, frb, rc);
1622 }
1623
fcmpu(const DoubleRegister fra,const DoubleRegister frb,CRegister cr)1624 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
1625 CRegister cr) {
1626 DCHECK(cr.code() >= 0 && cr.code() <= 7);
1627 emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
1628 }
1629
fmr(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1630 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
1631 RCBit rc) {
1632 emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
1633 }
1634
fctiwz(const DoubleRegister frt,const DoubleRegister frb)1635 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
1636 emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
1637 }
1638
fctiw(const DoubleRegister frt,const DoubleRegister frb)1639 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
1640 emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
1641 }
1642
fctiwuz(const DoubleRegister frt,const DoubleRegister frb)1643 void Assembler::fctiwuz(const DoubleRegister frt, const DoubleRegister frb) {
1644 emit(EXT4 | FCTIWUZ | frt.code() * B21 | frb.code() * B11);
1645 }
1646
frin(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1647 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
1648 RCBit rc) {
1649 emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
1650 }
1651
friz(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1652 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
1653 RCBit rc) {
1654 emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
1655 }
1656
frip(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1657 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
1658 RCBit rc) {
1659 emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
1660 }
1661
frim(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1662 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
1663 RCBit rc) {
1664 emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
1665 }
1666
frsp(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1667 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
1668 RCBit rc) {
1669 emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
1670 }
1671
fcfid(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1672 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
1673 RCBit rc) {
1674 emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
1675 }
1676
fcfidu(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1677 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
1678 RCBit rc) {
1679 emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
1680 }
1681
fcfidus(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1682 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
1683 RCBit rc) {
1684 emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
1685 }
1686
fcfids(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1687 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
1688 RCBit rc) {
1689 emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
1690 }
1691
fctid(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1692 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
1693 RCBit rc) {
1694 emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
1695 }
1696
fctidz(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1697 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
1698 RCBit rc) {
1699 emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
1700 }
1701
fctidu(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1702 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
1703 RCBit rc) {
1704 emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
1705 }
1706
fctiduz(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1707 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
1708 RCBit rc) {
1709 emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
1710 }
1711
fsel(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,const DoubleRegister frb,RCBit rc)1712 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
1713 const DoubleRegister frc, const DoubleRegister frb,
1714 RCBit rc) {
1715 emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1716 frc.code() * B6 | rc);
1717 }
1718
fneg(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1719 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
1720 RCBit rc) {
1721 emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
1722 }
1723
mtfsb0(FPSCRBit bit,RCBit rc)1724 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
1725 DCHECK_LT(static_cast<int>(bit), 32);
1726 int bt = bit;
1727 emit(EXT4 | MTFSB0 | bt * B21 | rc);
1728 }
1729
mtfsb1(FPSCRBit bit,RCBit rc)1730 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
1731 DCHECK_LT(static_cast<int>(bit), 32);
1732 int bt = bit;
1733 emit(EXT4 | MTFSB1 | bt * B21 | rc);
1734 }
1735
mtfsfi(int bf,int immediate,RCBit rc)1736 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
1737 emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
1738 }
1739
mffs(const DoubleRegister frt,RCBit rc)1740 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
1741 emit(EXT4 | MFFS | frt.code() * B21 | rc);
1742 }
1743
mtfsf(const DoubleRegister frb,bool L,int FLM,bool W,RCBit rc)1744 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
1745 RCBit rc) {
1746 emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
1747 }
1748
fsqrt(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1749 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
1750 RCBit rc) {
1751 emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
1752 }
1753
fabs(const DoubleRegister frt,const DoubleRegister frb,RCBit rc)1754 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
1755 RCBit rc) {
1756 emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
1757 }
1758
fmadd(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,const DoubleRegister frb,RCBit rc)1759 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
1760 const DoubleRegister frc, const DoubleRegister frb,
1761 RCBit rc) {
1762 emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1763 frc.code() * B6 | rc);
1764 }
1765
fmsub(const DoubleRegister frt,const DoubleRegister fra,const DoubleRegister frc,const DoubleRegister frb,RCBit rc)1766 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
1767 const DoubleRegister frc, const DoubleRegister frb,
1768 RCBit rc) {
1769 emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
1770 frc.code() * B6 | rc);
1771 }
1772
1773 // Vector instructions
mfvsrd(const Register ra,const Simd128Register rs)1774 void Assembler::mfvsrd(const Register ra, const Simd128Register rs) {
1775 int SX = 1;
1776 emit(MFVSRD | rs.code() * B21 | ra.code() * B16 | SX);
1777 }
1778
mfvsrwz(const Register ra,const Simd128Register rs)1779 void Assembler::mfvsrwz(const Register ra, const Simd128Register rs) {
1780 int SX = 1;
1781 emit(MFVSRWZ | rs.code() * B21 | ra.code() * B16 | SX);
1782 }
1783
mtvsrd(const Simd128Register rt,const Register ra)1784 void Assembler::mtvsrd(const Simd128Register rt, const Register ra) {
1785 int TX = 1;
1786 emit(MTVSRD | rt.code() * B21 | ra.code() * B16 | TX);
1787 }
1788
mtvsrdd(const Simd128Register rt,const Register ra,const Register rb)1789 void Assembler::mtvsrdd(const Simd128Register rt, const Register ra,
1790 const Register rb) {
1791 int TX = 1;
1792 emit(MTVSRDD | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | TX);
1793 }
1794
lxvd(const Simd128Register rt,const MemOperand & src)1795 void Assembler::lxvd(const Simd128Register rt, const MemOperand& src) {
1796 int TX = 1;
1797 emit(LXVD | rt.code() * B21 | src.ra().code() * B16 | src.rb().code() * B11 |
1798 TX);
1799 }
1800
stxvd(const Simd128Register rt,const MemOperand & dst)1801 void Assembler::stxvd(const Simd128Register rt, const MemOperand& dst) {
1802 int SX = 1;
1803 emit(STXVD | rt.code() * B21 | dst.ra().code() * B16 | dst.rb().code() * B11 |
1804 SX);
1805 }
1806
xxspltib(const Simd128Register rt,const Operand & imm)1807 void Assembler::xxspltib(const Simd128Register rt, const Operand& imm) {
1808 int TX = 1;
1809 emit(XXSPLTIB | rt.code() * B21 | imm.immediate() * B11 | TX);
1810 }
1811
1812 // Pseudo instructions.
nop(int type)1813 void Assembler::nop(int type) {
1814 Register reg = r0;
1815 switch (type) {
1816 case NON_MARKING_NOP:
1817 reg = r0;
1818 break;
1819 case GROUP_ENDING_NOP:
1820 reg = r2;
1821 break;
1822 case DEBUG_BREAK_NOP:
1823 reg = r3;
1824 break;
1825 default:
1826 UNIMPLEMENTED();
1827 }
1828
1829 ori(reg, reg, Operand::Zero());
1830 }
1831
IsNop(Instr instr,int type)1832 bool Assembler::IsNop(Instr instr, int type) {
1833 int reg = 0;
1834 switch (type) {
1835 case NON_MARKING_NOP:
1836 reg = 0;
1837 break;
1838 case GROUP_ENDING_NOP:
1839 reg = 2;
1840 break;
1841 case DEBUG_BREAK_NOP:
1842 reg = 3;
1843 break;
1844 default:
1845 UNIMPLEMENTED();
1846 }
1847 return instr == (ORI | reg * B21 | reg * B16);
1848 }
1849
GrowBuffer(int needed)1850 void Assembler::GrowBuffer(int needed) {
1851 DCHECK_EQ(buffer_start_, buffer_->start());
1852
1853 // Compute new buffer size.
1854 int old_size = buffer_->size();
1855 int new_size = std::min(2 * old_size, old_size + 1 * MB);
1856 int space = buffer_space() + (new_size - old_size);
1857 new_size += (space < needed) ? needed - space : 0;
1858
1859 // Some internal data structures overflow for very large buffers,
1860 // they must ensure that kMaximalBufferSize is not too large.
1861 if (new_size > kMaximalBufferSize) {
1862 V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
1863 }
1864
1865 // Set up new buffer.
1866 std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
1867 DCHECK_EQ(new_size, new_buffer->size());
1868 byte* new_start = new_buffer->start();
1869
1870 // Copy the data.
1871 intptr_t pc_delta = new_start - buffer_start_;
1872 intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
1873 size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
1874 MemMove(new_start, buffer_start_, pc_offset());
1875 MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
1876 reloc_size);
1877
1878 // Switch buffers.
1879 buffer_ = std::move(new_buffer);
1880 buffer_start_ = new_start;
1881 pc_ += pc_delta;
1882 reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
1883 reloc_info_writer.last_pc() + pc_delta);
1884
1885 // None of our relocation types are pc relative pointing outside the code
1886 // buffer nor pc absolute pointing inside the code buffer, so there is no need
1887 // to relocate any emitted relocation entries.
1888 }
1889
db(uint8_t data)1890 void Assembler::db(uint8_t data) {
1891 CheckBuffer();
1892 *reinterpret_cast<uint8_t*>(pc_) = data;
1893 pc_ += sizeof(uint8_t);
1894 }
1895
dd(uint32_t data)1896 void Assembler::dd(uint32_t data) {
1897 CheckBuffer();
1898 *reinterpret_cast<uint32_t*>(pc_) = data;
1899 pc_ += sizeof(uint32_t);
1900 }
1901
dq(uint64_t value)1902 void Assembler::dq(uint64_t value) {
1903 CheckBuffer();
1904 *reinterpret_cast<uint64_t*>(pc_) = value;
1905 pc_ += sizeof(uint64_t);
1906 }
1907
dp(uintptr_t data)1908 void Assembler::dp(uintptr_t data) {
1909 CheckBuffer();
1910 *reinterpret_cast<uintptr_t*>(pc_) = data;
1911 pc_ += sizeof(uintptr_t);
1912 }
1913
RecordRelocInfo(RelocInfo::Mode rmode,intptr_t data)1914 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
1915 if (!ShouldRecordRelocInfo(rmode)) return;
1916 DeferredRelocInfo rinfo(pc_offset(), rmode, data);
1917 relocations_.push_back(rinfo);
1918 }
1919
EmitRelocations()1920 void Assembler::EmitRelocations() {
1921 EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
1922
1923 for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
1924 it != relocations_.end(); it++) {
1925 RelocInfo::Mode rmode = it->rmode();
1926 Address pc = reinterpret_cast<Address>(buffer_start_) + it->position();
1927 RelocInfo rinfo(pc, rmode, it->data(), Code());
1928
1929 // Fix up internal references now that they are guaranteed to be bound.
1930 if (RelocInfo::IsInternalReference(rmode)) {
1931 // Jump table entry
1932 intptr_t pos = static_cast<intptr_t>(Memory<Address>(pc));
1933 Memory<Address>(pc) = reinterpret_cast<Address>(buffer_start_) + pos;
1934 } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
1935 // mov sequence
1936 intptr_t pos = static_cast<intptr_t>(target_address_at(pc, kNullAddress));
1937 set_target_address_at(pc, 0,
1938 reinterpret_cast<Address>(buffer_start_) + pos,
1939 SKIP_ICACHE_FLUSH);
1940 }
1941
1942 reloc_info_writer.Write(&rinfo);
1943 }
1944 }
1945
BlockTrampolinePoolFor(int instructions)1946 void Assembler::BlockTrampolinePoolFor(int instructions) {
1947 BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
1948 }
1949
CheckTrampolinePool()1950 void Assembler::CheckTrampolinePool() {
1951 // Some small sequences of instructions must not be broken up by the
1952 // insertion of a trampoline pool; such sequences are protected by setting
1953 // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
1954 // which are both checked here. Also, recursive calls to CheckTrampolinePool
1955 // are blocked by trampoline_pool_blocked_nesting_.
1956 if (trampoline_pool_blocked_nesting_ > 0) return;
1957 if (pc_offset() < no_trampoline_pool_before_) {
1958 next_trampoline_check_ = no_trampoline_pool_before_;
1959 return;
1960 }
1961
1962 DCHECK(!trampoline_emitted_);
1963 if (tracked_branch_count_ > 0) {
1964 int size = tracked_branch_count_ * kInstrSize;
1965
1966 // As we are only going to emit trampoline once, we need to prevent any
1967 // further emission.
1968 trampoline_emitted_ = true;
1969 next_trampoline_check_ = kMaxInt;
1970
1971 // First we emit jump, then we emit trampoline pool.
1972 b(size + kInstrSize, LeaveLK);
1973 for (int i = size; i > 0; i -= kInstrSize) {
1974 b(i, LeaveLK);
1975 }
1976
1977 trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
1978 }
1979 }
1980
PatchingAssembler(const AssemblerOptions & options,byte * address,int instructions)1981 PatchingAssembler::PatchingAssembler(const AssemblerOptions& options,
1982 byte* address, int instructions)
1983 : Assembler(options, ExternalAssemblerBuffer(
1984 address, instructions * kInstrSize + kGap)) {
1985 DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
1986 }
1987
~PatchingAssembler()1988 PatchingAssembler::~PatchingAssembler() {
1989 // Check that the code was patched as expected.
1990 DCHECK_EQ(pc_, buffer_start_ + buffer_->size() - kGap);
1991 DCHECK_EQ(reloc_info_writer.pos(), buffer_start_ + buffer_->size());
1992 }
1993
UseScratchRegisterScope(Assembler * assembler)1994 UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
1995 : assembler_(assembler),
1996 old_available_(*assembler->GetScratchRegisterList()) {}
1997
~UseScratchRegisterScope()1998 UseScratchRegisterScope::~UseScratchRegisterScope() {
1999 *assembler_->GetScratchRegisterList() = old_available_;
2000 }
2001
Acquire()2002 Register UseScratchRegisterScope::Acquire() {
2003 RegList* available = assembler_->GetScratchRegisterList();
2004 DCHECK_NOT_NULL(available);
2005 DCHECK_NE(*available, 0);
2006 int index = static_cast<int>(base::bits::CountTrailingZeros32(*available));
2007 Register reg = Register::from_code(index);
2008 *available &= ~reg.bit();
2009 return reg;
2010 }
2011
2012 } // namespace internal
2013 } // namespace v8
2014
2015 #endif // V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
2016