1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
28 #define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
29
30 #include <algorithm>
31 #include <limits>
32
33 #include "../code-generation-scopes-vixl.h"
34 #include "../globals-vixl.h"
35 #include "../macro-assembler-interface.h"
36
37 #include "assembler-aarch64.h"
38 #include "debugger-aarch64.h"
39 #include "instrument-aarch64.h"
40 // Required in order to generate debugging instructions for the simulator. This
41 // is needed regardless of whether the simulator is included or not, since
42 // generating simulator specific instructions is controlled at runtime.
43 #include "simulator-constants-aarch64.h"
44
45
46 #define LS_MACRO_LIST(V) \
47 V(Ldrb, Register&, rt, LDRB_w) \
48 V(Strb, Register&, rt, STRB_w) \
49 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
50 V(Ldrh, Register&, rt, LDRH_w) \
51 V(Strh, Register&, rt, STRH_w) \
52 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
53 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
54 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
55 V(Ldrsw, Register&, rt, LDRSW_x)
56
57
58 #define LSPAIR_MACRO_LIST(V) \
59 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
60 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
61 V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
62
63 namespace vixl {
64 namespace aarch64 {
65
66 // Forward declaration
67 class MacroAssembler;
68 class UseScratchRegisterScope;
69
70 class Pool {
71 public:
Pool(MacroAssembler * masm)72 explicit Pool(MacroAssembler* masm)
73 : checkpoint_(kNoCheckpointRequired), masm_(masm) {
74 Reset();
75 }
76
Reset()77 void Reset() {
78 checkpoint_ = kNoCheckpointRequired;
79 monitor_ = 0;
80 }
81
Block()82 void Block() { monitor_++; }
83 void Release();
IsBlocked()84 bool IsBlocked() const { return monitor_ != 0; }
85
86 static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
87
88 void SetNextCheckpoint(ptrdiff_t checkpoint);
GetCheckpoint()89 ptrdiff_t GetCheckpoint() const { return checkpoint_; }
90 VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) {
91 return GetCheckpoint();
92 }
93
94 enum EmitOption { kBranchRequired, kNoBranchRequired };
95
96 protected:
97 // Next buffer offset at which a check is required for this pool.
98 ptrdiff_t checkpoint_;
99 // Indicates whether the emission of this pool is blocked.
100 int monitor_;
101 // The MacroAssembler using this pool.
102 MacroAssembler* masm_;
103 };
104
105
106 class LiteralPool : public Pool {
107 public:
108 explicit LiteralPool(MacroAssembler* masm);
109 ~LiteralPool();
110 void Reset();
111
112 void AddEntry(RawLiteral* literal);
IsEmpty()113 bool IsEmpty() const { return entries_.empty(); }
114 size_t GetSize() const;
115 VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); }
116
117 size_t GetMaxSize() const;
118 VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
119
120 size_t GetOtherPoolsMaxSize() const;
121 VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
122 return GetOtherPoolsMaxSize();
123 }
124
125 void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
126 // Check whether we need to emit the literal pool in order to be able to
127 // safely emit a branch with a given range.
128 void CheckEmitForBranch(size_t range);
129 void Emit(EmitOption option = kNoBranchRequired);
130
131 void SetNextRecommendedCheckpoint(ptrdiff_t offset);
132 ptrdiff_t GetNextRecommendedCheckpoint();
133 VIXL_DEPRECATED("GetNextRecommendedCheckpoint",
134 ptrdiff_t NextRecommendedCheckpoint()) {
135 return GetNextRecommendedCheckpoint();
136 }
137
138 void UpdateFirstUse(ptrdiff_t use_position);
139
DeleteOnDestruction(RawLiteral * literal)140 void DeleteOnDestruction(RawLiteral* literal) {
141 deleted_on_destruction_.push_back(literal);
142 }
143
144 // Recommended not exact since the pool can be blocked for short periods.
145 static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
146
147 private:
148 std::vector<RawLiteral*> entries_;
149 size_t size_;
150 ptrdiff_t first_use_;
151 // The parent class `Pool` provides a `checkpoint_`, which is the buffer
152 // offset before which a check *must* occur. This recommended checkpoint
153 // indicates when we would like to start emitting the constant pool. The
154 // MacroAssembler can, but does not have to, check the buffer when the
155 // checkpoint is reached.
156 ptrdiff_t recommended_checkpoint_;
157
158 std::vector<RawLiteral*> deleted_on_destruction_;
159 };
160
161
GetSize()162 inline size_t LiteralPool::GetSize() const {
163 // Account for the pool header.
164 return size_ + kInstructionSize;
165 }
166
167
GetMaxSize()168 inline size_t LiteralPool::GetMaxSize() const {
169 // Account for the potential branch over the pool.
170 return GetSize() + kInstructionSize;
171 }
172
173
GetNextRecommendedCheckpoint()174 inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() {
175 return first_use_ + kRecommendedLiteralPoolRange;
176 }
177
178
179 class VeneerPool : public Pool {
180 public:
VeneerPool(MacroAssembler * masm)181 explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
182
183 void Reset();
184
Block()185 void Block() { monitor_++; }
186 void Release();
IsBlocked()187 bool IsBlocked() const { return monitor_ != 0; }
IsEmpty()188 bool IsEmpty() const { return unresolved_branches_.IsEmpty(); }
189
190 class BranchInfo {
191 public:
BranchInfo()192 BranchInfo()
193 : first_unreacheable_pc_(0),
194 pc_offset_(0),
195 label_(NULL),
196 branch_type_(UnknownBranchType) {}
BranchInfo(ptrdiff_t offset,Label * label,ImmBranchType branch_type)197 BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
198 : pc_offset_(offset), label_(label), branch_type_(branch_type) {
199 first_unreacheable_pc_ =
200 pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_);
201 }
202
IsValidComparison(const BranchInfo & branch_1,const BranchInfo & branch_2)203 static bool IsValidComparison(const BranchInfo& branch_1,
204 const BranchInfo& branch_2) {
205 // BranchInfo are always compared against against other objects with
206 // the same branch type.
207 if (branch_1.branch_type_ != branch_2.branch_type_) {
208 return false;
209 }
210 // Since we should never have two branch infos with the same offsets, it
211 // first looks like we should check that offsets are different. However
212 // the operators may also be used to *search* for a branch info in the
213 // set.
214 bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
215 return (!same_offsets || ((branch_1.label_ == branch_2.label_) &&
216 (branch_1.first_unreacheable_pc_ ==
217 branch_2.first_unreacheable_pc_)));
218 }
219
220 // We must provide comparison operators to work with InvalSet.
221 bool operator==(const BranchInfo& other) const {
222 VIXL_ASSERT(IsValidComparison(*this, other));
223 return pc_offset_ == other.pc_offset_;
224 }
225 bool operator<(const BranchInfo& other) const {
226 VIXL_ASSERT(IsValidComparison(*this, other));
227 return pc_offset_ < other.pc_offset_;
228 }
229 bool operator<=(const BranchInfo& other) const {
230 VIXL_ASSERT(IsValidComparison(*this, other));
231 return pc_offset_ <= other.pc_offset_;
232 }
233 bool operator>(const BranchInfo& other) const {
234 VIXL_ASSERT(IsValidComparison(*this, other));
235 return pc_offset_ > other.pc_offset_;
236 }
237
238 // First instruction position that is not reachable by the branch using a
239 // positive branch offset.
240 ptrdiff_t first_unreacheable_pc_;
241 // Offset of the branch in the code generation buffer.
242 ptrdiff_t pc_offset_;
243 // The label branched to.
244 Label* label_;
245 ImmBranchType branch_type_;
246 };
247
BranchTypeUsesVeneers(ImmBranchType type)248 bool BranchTypeUsesVeneers(ImmBranchType type) {
249 return (type != UnknownBranchType) && (type != UncondBranchType);
250 }
251
252 void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
253 Label* label,
254 ImmBranchType branch_type);
255 void DeleteUnresolvedBranchInfoForLabel(Label* label);
256
257 bool ShouldEmitVeneer(int64_t first_unreacheable_pc, size_t amount);
ShouldEmitVeneers(size_t amount)258 bool ShouldEmitVeneers(size_t amount) {
259 return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount);
260 }
261
262 void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
263 void Emit(EmitOption option, size_t margin);
264
265 // The code size generated for a veneer. Currently one branch instruction.
266 // This is for code size checking purposes, and can be extended in the future
267 // for example if we decide to add nops between the veneers.
268 static const int kVeneerCodeSize = 1 * kInstructionSize;
269 // The maximum size of code other than veneers that can be generated when
270 // emitting a veneer pool. Currently there can be an additional branch to jump
271 // over the pool.
272 static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
273
UpdateNextCheckPoint()274 void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); }
275
GetNumberOfPotentialVeneers()276 int GetNumberOfPotentialVeneers() const {
277 return static_cast<int>(unresolved_branches_.GetSize());
278 }
279 VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()280 int NumberOfPotentialVeneers() const) {
281 return GetNumberOfPotentialVeneers();
282 }
283
GetMaxSize()284 size_t GetMaxSize() const {
285 return kPoolNonVeneerCodeSize +
286 unresolved_branches_.GetSize() * kVeneerCodeSize;
287 }
288 VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
289
290 size_t GetOtherPoolsMaxSize() const;
291 VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
292 return GetOtherPoolsMaxSize();
293 }
294
295 static const int kNPreallocatedInfos = 4;
296 static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
297 static const size_t kReclaimFrom = 128;
298 static const size_t kReclaimFactor = 16;
299
300 private:
301 typedef InvalSet<BranchInfo,
302 kNPreallocatedInfos,
303 ptrdiff_t,
304 kInvalidOffset,
305 kReclaimFrom,
306 kReclaimFactor>
307 BranchInfoTypedSetBase;
308 typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
309
310 class BranchInfoTypedSet : public BranchInfoTypedSetBase {
311 public:
BranchInfoTypedSet()312 BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
313
GetFirstLimit()314 ptrdiff_t GetFirstLimit() {
315 if (empty()) {
316 return kInvalidOffset;
317 }
318 return GetMinElementKey();
319 }
320 VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
321 return GetFirstLimit();
322 }
323 };
324
325 class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
326 public:
BranchInfoTypedSetIterator()327 BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
BranchInfoTypedSetIterator(BranchInfoTypedSet * typed_set)328 explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
329 : BranchInfoTypedSetIterBase(typed_set) {}
330
331 // TODO: Remove these and use the STL-like interface instead.
332 using BranchInfoTypedSetIterBase::Advance;
333 using BranchInfoTypedSetIterBase::Current;
334 };
335
336 class BranchInfoSet {
337 public:
insert(BranchInfo branch_info)338 void insert(BranchInfo branch_info) {
339 ImmBranchType type = branch_info.branch_type_;
340 VIXL_ASSERT(IsValidBranchType(type));
341 typed_set_[BranchIndexFromType(type)].insert(branch_info);
342 }
343
erase(BranchInfo branch_info)344 void erase(BranchInfo branch_info) {
345 if (IsValidBranchType(branch_info.branch_type_)) {
346 int index =
347 BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
348 typed_set_[index].erase(branch_info);
349 }
350 }
351
GetSize()352 size_t GetSize() const {
353 size_t res = 0;
354 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
355 res += typed_set_[i].size();
356 }
357 return res;
358 }
359 VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); }
360
IsEmpty()361 bool IsEmpty() const {
362 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
363 if (!typed_set_[i].empty()) {
364 return false;
365 }
366 }
367 return true;
368 }
empty()369 VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); }
370
GetFirstLimit()371 ptrdiff_t GetFirstLimit() {
372 ptrdiff_t res = kInvalidOffset;
373 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
374 res = std::min(res, typed_set_[i].GetFirstLimit());
375 }
376 return res;
377 }
378 VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
379 return GetFirstLimit();
380 }
381
Reset()382 void Reset() {
383 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
384 typed_set_[i].clear();
385 }
386 }
387
BranchTypeFromIndex(int index)388 static ImmBranchType BranchTypeFromIndex(int index) {
389 switch (index) {
390 case 0:
391 return CondBranchType;
392 case 1:
393 return CompareBranchType;
394 case 2:
395 return TestBranchType;
396 default:
397 VIXL_UNREACHABLE();
398 return UnknownBranchType;
399 }
400 }
BranchIndexFromType(ImmBranchType branch_type)401 static int BranchIndexFromType(ImmBranchType branch_type) {
402 switch (branch_type) {
403 case CondBranchType:
404 return 0;
405 case CompareBranchType:
406 return 1;
407 case TestBranchType:
408 return 2;
409 default:
410 VIXL_UNREACHABLE();
411 return 0;
412 }
413 }
414
IsValidBranchType(ImmBranchType branch_type)415 bool IsValidBranchType(ImmBranchType branch_type) {
416 return (branch_type != UnknownBranchType) &&
417 (branch_type != UncondBranchType);
418 }
419
420 private:
421 static const int kNumberOfTrackedBranchTypes = 3;
422 BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
423
424 friend class VeneerPool;
425 friend class BranchInfoSetIterator;
426 };
427
428 class BranchInfoSetIterator {
429 public:
BranchInfoSetIterator(BranchInfoSet * set)430 explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
431 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
432 new (&sub_iterator_[i])
433 BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
434 }
435 }
436
Current()437 VeneerPool::BranchInfo* Current() {
438 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
439 if (!sub_iterator_[i].Done()) {
440 return sub_iterator_[i].Current();
441 }
442 }
443 VIXL_UNREACHABLE();
444 return NULL;
445 }
446
Advance()447 void Advance() {
448 VIXL_ASSERT(!Done());
449 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
450 if (!sub_iterator_[i].Done()) {
451 sub_iterator_[i].Advance();
452 return;
453 }
454 }
455 VIXL_UNREACHABLE();
456 }
457
Done()458 bool Done() const {
459 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
460 if (!sub_iterator_[i].Done()) return false;
461 }
462 return true;
463 }
464
AdvanceToNextType()465 void AdvanceToNextType() {
466 VIXL_ASSERT(!Done());
467 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
468 if (!sub_iterator_[i].Done()) {
469 sub_iterator_[i].Finish();
470 return;
471 }
472 }
473 VIXL_UNREACHABLE();
474 }
475
DeleteCurrentAndAdvance()476 void DeleteCurrentAndAdvance() {
477 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
478 if (!sub_iterator_[i].Done()) {
479 sub_iterator_[i].DeleteCurrentAndAdvance();
480 return;
481 }
482 }
483 }
484
485 private:
486 BranchInfoSet* set_;
487 BranchInfoTypedSetIterator
488 sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
489 };
490
GetNextCheckPoint()491 ptrdiff_t GetNextCheckPoint() {
492 if (unresolved_branches_.IsEmpty()) {
493 return kNoCheckpointRequired;
494 } else {
495 return unresolved_branches_.GetFirstLimit();
496 }
497 }
498 VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
499 return GetNextCheckPoint();
500 }
501
502 // Information about unresolved (forward) branches.
503 BranchInfoSet unresolved_branches_;
504 };
505
506
507 // Helper for common Emission checks.
508 // The macro-instruction maps to a single instruction.
509 class SingleEmissionCheckScope : public EmissionCheckScope {
510 public:
SingleEmissionCheckScope(MacroAssemblerInterface * masm)511 explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm)
512 : EmissionCheckScope(masm, kInstructionSize) {}
513 };
514
515
516 // The macro instruction is a "typical" macro-instruction. Typical macro-
517 // instruction only emit a few instructions, a few being defined as 8 here.
518 class MacroEmissionCheckScope : public EmissionCheckScope {
519 public:
MacroEmissionCheckScope(MacroAssemblerInterface * masm)520 explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm)
521 : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
522
523 private:
524 static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
525 };
526
527
528 enum BranchType {
529 // Copies of architectural conditions.
530 // The associated conditions can be used in place of those, the code will
531 // take care of reinterpreting them with the correct type.
532 integer_eq = eq,
533 integer_ne = ne,
534 integer_hs = hs,
535 integer_lo = lo,
536 integer_mi = mi,
537 integer_pl = pl,
538 integer_vs = vs,
539 integer_vc = vc,
540 integer_hi = hi,
541 integer_ls = ls,
542 integer_ge = ge,
543 integer_lt = lt,
544 integer_gt = gt,
545 integer_le = le,
546 integer_al = al,
547 integer_nv = nv,
548
549 // These two are *different* from the architectural codes al and nv.
550 // 'always' is used to generate unconditional branches.
551 // 'never' is used to not generate a branch (generally as the inverse
552 // branch type of 'always).
553 always,
554 never,
555 // cbz and cbnz
556 reg_zero,
557 reg_not_zero,
558 // tbz and tbnz
559 reg_bit_clear,
560 reg_bit_set,
561
562 // Aliases.
563 kBranchTypeFirstCondition = eq,
564 kBranchTypeLastCondition = nv,
565 kBranchTypeFirstUsingReg = reg_zero,
566 kBranchTypeFirstUsingBit = reg_bit_clear
567 };
568
569
570 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
571
572 // The macro assembler supports moving automatically pre-shifted immediates for
573 // arithmetic and logical instructions, and then applying a post shift in the
574 // instruction to undo the modification, in order to reduce the code emitted for
575 // an operation. For example:
576 //
577 // Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
578 //
579 // This optimisation can be only partially applied when the stack pointer is an
580 // operand or destination, so this enumeration is used to control the shift.
581 enum PreShiftImmMode {
582 kNoShift, // Don't pre-shift.
583 kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
584 kAnyShift // Allow any pre-shift.
585 };
586
587
588 class MacroAssembler : public Assembler, public MacroAssemblerInterface {
589 public:
590 explicit MacroAssembler(
591 PositionIndependentCodeOption pic = PositionIndependentCode);
592 MacroAssembler(size_t capacity,
593 PositionIndependentCodeOption pic = PositionIndependentCode);
594 MacroAssembler(byte* buffer,
595 size_t capacity,
596 PositionIndependentCodeOption pic = PositionIndependentCode);
597 ~MacroAssembler();
598
599 enum FinalizeOption {
600 kFallThrough, // There may be more code to execute after calling Finalize.
601 kUnreachable // Anything generated after calling Finalize is unreachable.
602 };
603
AsAssemblerBase()604 virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE {
605 return this;
606 }
607
608 // TODO(pools): implement these functions.
EmitPoolHeader()609 virtual void EmitPoolHeader() VIXL_OVERRIDE {}
EmitPoolFooter()610 virtual void EmitPoolFooter() VIXL_OVERRIDE {}
EmitPaddingBytes(int n)611 virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { USE(n); }
EmitNopBytes(int n)612 virtual void EmitNopBytes(int n) VIXL_OVERRIDE { USE(n); }
613
614 // Start generating code from the beginning of the buffer, discarding any code
615 // and data that has already been emitted into the buffer.
616 //
617 // In order to avoid any accidental transfer of state, Reset ASSERTs that the
618 // constant pool is not blocked.
619 void Reset();
620
621 // Finalize a code buffer of generated instructions. This function must be
622 // called before executing or copying code from the buffer. By default,
623 // anything generated after this should not be reachable (the last instruction
624 // generated is an unconditional branch). If you need to generate more code,
625 // then set `option` to kFallThrough.
626 void FinalizeCode(FinalizeOption option = kUnreachable);
627
628
629 // Constant generation helpers.
630 // These functions return the number of instructions required to move the
631 // immediate into the destination register. Also, if the masm pointer is
632 // non-null, it generates the code to do so.
633 // The two features are implemented using one function to avoid duplication of
634 // the logic.
635 // The function can be used to evaluate the cost of synthesizing an
636 // instruction using 'mov immediate' instructions. A user might prefer loading
637 // a constant using the literal pool instead of using multiple 'mov immediate'
638 // instructions.
639 static int MoveImmediateHelper(MacroAssembler* masm,
640 const Register& rd,
641 uint64_t imm);
642 static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
643 const Register& dst,
644 int64_t imm);
645
646
647 // Logical macros.
648 void And(const Register& rd, const Register& rn, const Operand& operand);
649 void Ands(const Register& rd, const Register& rn, const Operand& operand);
650 void Bic(const Register& rd, const Register& rn, const Operand& operand);
651 void Bics(const Register& rd, const Register& rn, const Operand& operand);
652 void Orr(const Register& rd, const Register& rn, const Operand& operand);
653 void Orn(const Register& rd, const Register& rn, const Operand& operand);
654 void Eor(const Register& rd, const Register& rn, const Operand& operand);
655 void Eon(const Register& rd, const Register& rn, const Operand& operand);
656 void Tst(const Register& rn, const Operand& operand);
657 void LogicalMacro(const Register& rd,
658 const Register& rn,
659 const Operand& operand,
660 LogicalOp op);
661
662 // Add and sub macros.
663 void Add(const Register& rd,
664 const Register& rn,
665 const Operand& operand,
666 FlagsUpdate S = LeaveFlags);
667 void Adds(const Register& rd, const Register& rn, const Operand& operand);
668 void Sub(const Register& rd,
669 const Register& rn,
670 const Operand& operand,
671 FlagsUpdate S = LeaveFlags);
672 void Subs(const Register& rd, const Register& rn, const Operand& operand);
673 void Cmn(const Register& rn, const Operand& operand);
674 void Cmp(const Register& rn, const Operand& operand);
675 void Neg(const Register& rd, const Operand& operand);
676 void Negs(const Register& rd, const Operand& operand);
677
678 void AddSubMacro(const Register& rd,
679 const Register& rn,
680 const Operand& operand,
681 FlagsUpdate S,
682 AddSubOp op);
683
684 // Add/sub with carry macros.
685 void Adc(const Register& rd, const Register& rn, const Operand& operand);
686 void Adcs(const Register& rd, const Register& rn, const Operand& operand);
687 void Sbc(const Register& rd, const Register& rn, const Operand& operand);
688 void Sbcs(const Register& rd, const Register& rn, const Operand& operand);
689 void Ngc(const Register& rd, const Operand& operand);
690 void Ngcs(const Register& rd, const Operand& operand);
691 void AddSubWithCarryMacro(const Register& rd,
692 const Register& rn,
693 const Operand& operand,
694 FlagsUpdate S,
695 AddSubWithCarryOp op);
696
697 // Move macros.
698 void Mov(const Register& rd, uint64_t imm);
699 void Mov(const Register& rd,
700 const Operand& operand,
701 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
Mvn(const Register & rd,uint64_t imm)702 void Mvn(const Register& rd, uint64_t imm) {
703 Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask));
704 }
705 void Mvn(const Register& rd, const Operand& operand);
706
707 // Try to move an immediate into the destination register in a single
708 // instruction. Returns true for success, and updates the contents of dst.
709 // Returns false, otherwise.
710 bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
711
712 // Move an immediate into register dst, and return an Operand object for
713 // use with a subsequent instruction that accepts a shift. The value moved
714 // into dst is not necessarily equal to imm; it may have had a shifting
715 // operation applied to it that will be subsequently undone by the shift
716 // applied in the Operand.
717 Operand MoveImmediateForShiftedOp(const Register& dst,
718 int64_t imm,
719 PreShiftImmMode mode);
720
721 void Move(const GenericOperand& dst, const GenericOperand& src);
722
723 // Synthesises the address represented by a MemOperand into a register.
724 void ComputeAddress(const Register& dst, const MemOperand& mem_op);
725
726 // Conditional macros.
727 void Ccmp(const Register& rn,
728 const Operand& operand,
729 StatusFlags nzcv,
730 Condition cond);
731 void Ccmn(const Register& rn,
732 const Operand& operand,
733 StatusFlags nzcv,
734 Condition cond);
735 void ConditionalCompareMacro(const Register& rn,
736 const Operand& operand,
737 StatusFlags nzcv,
738 Condition cond,
739 ConditionalCompareOp op);
740
741 // On return, the boolean values pointed to will indicate whether `left` and
742 // `right` should be synthesised in a temporary register.
GetCselSynthesisInformation(const Register & rd,const Operand & left,const Operand & right,bool * should_synthesise_left,bool * should_synthesise_right)743 static void GetCselSynthesisInformation(const Register& rd,
744 const Operand& left,
745 const Operand& right,
746 bool* should_synthesise_left,
747 bool* should_synthesise_right) {
748 // Note that the helper does not need to look at the condition.
749 CselHelper(NULL,
750 rd,
751 left,
752 right,
753 eq,
754 should_synthesise_left,
755 should_synthesise_right);
756 }
757
Csel(const Register & rd,const Operand & left,const Operand & right,Condition cond)758 void Csel(const Register& rd,
759 const Operand& left,
760 const Operand& right,
761 Condition cond) {
762 CselHelper(this, rd, left, right, cond);
763 }
764
765 // Load/store macros.
766 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
767 void FN(const REGTYPE REG, const MemOperand& addr);
768 LS_MACRO_LIST(DECLARE_FUNCTION)
769 #undef DECLARE_FUNCTION
770
771 void LoadStoreMacro(const CPURegister& rt,
772 const MemOperand& addr,
773 LoadStoreOp op);
774
775 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
776 void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
777 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
778 #undef DECLARE_FUNCTION
779
780 void LoadStorePairMacro(const CPURegister& rt,
781 const CPURegister& rt2,
782 const MemOperand& addr,
783 LoadStorePairOp op);
784
785 void Prfm(PrefetchOperation op, const MemOperand& addr);
786
787 // Push or pop up to 4 registers of the same width to or from the stack,
788 // using the current stack pointer as set by SetStackPointer.
789 //
790 // If an argument register is 'NoReg', all further arguments are also assumed
791 // to be 'NoReg', and are thus not pushed or popped.
792 //
793 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
794 // to "Push(a); Push(b);".
795 //
796 // It is valid to push the same register more than once, and there is no
797 // restriction on the order in which registers are specified.
798 //
799 // It is not valid to pop into the same register more than once in one
800 // operation, not even into the zero register.
801 //
802 // If the current stack pointer (as set by SetStackPointer) is sp, then it
803 // must be aligned to 16 bytes on entry and the total size of the specified
804 // registers must also be a multiple of 16 bytes.
805 //
806 // Even if the current stack pointer is not the system stack pointer (sp),
807 // Push (and derived methods) will still modify the system stack pointer in
808 // order to comply with ABI rules about accessing memory below the system
809 // stack pointer.
810 //
811 // Other than the registers passed into Pop, the stack pointer and (possibly)
812 // the system stack pointer, these methods do not modify any other registers.
813 void Push(const CPURegister& src0,
814 const CPURegister& src1 = NoReg,
815 const CPURegister& src2 = NoReg,
816 const CPURegister& src3 = NoReg);
817 void Pop(const CPURegister& dst0,
818 const CPURegister& dst1 = NoReg,
819 const CPURegister& dst2 = NoReg,
820 const CPURegister& dst3 = NoReg);
821
822 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
823 // specifies the registers that are to be pushed or popped. Higher-numbered
824 // registers are associated with higher memory addresses (as in the A32 push
825 // and pop instructions).
826 //
827 // (Push|Pop)SizeRegList allow you to specify the register size as a
828 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
829 // supported.
830 //
831 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
832 void PushCPURegList(CPURegList registers);
833 void PopCPURegList(CPURegList registers);
834
835 void PushSizeRegList(
836 RegList registers,
837 unsigned reg_size,
838 CPURegister::RegisterType type = CPURegister::kRegister) {
839 PushCPURegList(CPURegList(type, reg_size, registers));
840 }
841 void PopSizeRegList(RegList registers,
842 unsigned reg_size,
843 CPURegister::RegisterType type = CPURegister::kRegister) {
844 PopCPURegList(CPURegList(type, reg_size, registers));
845 }
PushXRegList(RegList regs)846 void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); }
PopXRegList(RegList regs)847 void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); }
PushWRegList(RegList regs)848 void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); }
PopWRegList(RegList regs)849 void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); }
PushDRegList(RegList regs)850 void PushDRegList(RegList regs) {
851 PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
852 }
PopDRegList(RegList regs)853 void PopDRegList(RegList regs) {
854 PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
855 }
PushSRegList(RegList regs)856 void PushSRegList(RegList regs) {
857 PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
858 }
PopSRegList(RegList regs)859 void PopSRegList(RegList regs) {
860 PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
861 }
862
863 // Push the specified register 'count' times.
864 void PushMultipleTimes(int count, Register src);
865
866 // Poke 'src' onto the stack. The offset is in bytes.
867 //
868 // If the current stack pointer (as set by SetStackPointer) is sp, then sp
869 // must be aligned to 16 bytes.
870 void Poke(const Register& src, const Operand& offset);
871
872 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
873 //
874 // If the current stack pointer (as set by SetStackPointer) is sp, then sp
875 // must be aligned to 16 bytes.
876 void Peek(const Register& dst, const Operand& offset);
877
878 // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
879 // specifies the registers that are to be pushed or popped. Higher-numbered
880 // registers are associated with higher memory addresses.
881 //
882 // (Peek|Poke)SizeRegList allow you to specify the register size as a
883 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
884 // supported.
885 //
886 // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
PeekCPURegList(CPURegList registers,int64_t offset)887 void PeekCPURegList(CPURegList registers, int64_t offset) {
888 LoadCPURegList(registers, MemOperand(StackPointer(), offset));
889 }
PokeCPURegList(CPURegList registers,int64_t offset)890 void PokeCPURegList(CPURegList registers, int64_t offset) {
891 StoreCPURegList(registers, MemOperand(StackPointer(), offset));
892 }
893
894 void PeekSizeRegList(
895 RegList registers,
896 int64_t offset,
897 unsigned reg_size,
898 CPURegister::RegisterType type = CPURegister::kRegister) {
899 PeekCPURegList(CPURegList(type, reg_size, registers), offset);
900 }
901 void PokeSizeRegList(
902 RegList registers,
903 int64_t offset,
904 unsigned reg_size,
905 CPURegister::RegisterType type = CPURegister::kRegister) {
906 PokeCPURegList(CPURegList(type, reg_size, registers), offset);
907 }
PeekXRegList(RegList regs,int64_t offset)908 void PeekXRegList(RegList regs, int64_t offset) {
909 PeekSizeRegList(regs, offset, kXRegSize);
910 }
PokeXRegList(RegList regs,int64_t offset)911 void PokeXRegList(RegList regs, int64_t offset) {
912 PokeSizeRegList(regs, offset, kXRegSize);
913 }
PeekWRegList(RegList regs,int64_t offset)914 void PeekWRegList(RegList regs, int64_t offset) {
915 PeekSizeRegList(regs, offset, kWRegSize);
916 }
PokeWRegList(RegList regs,int64_t offset)917 void PokeWRegList(RegList regs, int64_t offset) {
918 PokeSizeRegList(regs, offset, kWRegSize);
919 }
PeekDRegList(RegList regs,int64_t offset)920 void PeekDRegList(RegList regs, int64_t offset) {
921 PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
922 }
PokeDRegList(RegList regs,int64_t offset)923 void PokeDRegList(RegList regs, int64_t offset) {
924 PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
925 }
PeekSRegList(RegList regs,int64_t offset)926 void PeekSRegList(RegList regs, int64_t offset) {
927 PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
928 }
PokeSRegList(RegList regs,int64_t offset)929 void PokeSRegList(RegList regs, int64_t offset) {
930 PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
931 }
932
933
934 // Claim or drop stack space without actually accessing memory.
935 //
936 // If the current stack pointer (as set by SetStackPointer) is sp, then it
937 // must be aligned to 16 bytes and the size claimed or dropped must be a
938 // multiple of 16 bytes.
939 void Claim(const Operand& size);
940 void Drop(const Operand& size);
941
942 // Preserve the callee-saved registers (as defined by AAPCS64).
943 //
944 // Higher-numbered registers are pushed before lower-numbered registers, and
945 // thus get higher addresses.
946 // Floating-point registers are pushed before general-purpose registers, and
947 // thus get higher addresses.
948 //
949 // This method must not be called unless StackPointer() is sp, and it is
950 // aligned to 16 bytes.
951 void PushCalleeSavedRegisters();
952
953 // Restore the callee-saved registers (as defined by AAPCS64).
954 //
955 // Higher-numbered registers are popped after lower-numbered registers, and
956 // thus come from higher addresses.
957 // Floating-point registers are popped after general-purpose registers, and
958 // thus come from higher addresses.
959 //
960 // This method must not be called unless StackPointer() is sp, and it is
961 // aligned to 16 bytes.
962 void PopCalleeSavedRegisters();
963
964 void LoadCPURegList(CPURegList registers, const MemOperand& src);
965 void StoreCPURegList(CPURegList registers, const MemOperand& dst);
966
967 // Remaining instructions are simple pass-through calls to the assembler.
Adr(const Register & rd,Label * label)968 void Adr(const Register& rd, Label* label) {
969 VIXL_ASSERT(allow_macro_instructions_);
970 VIXL_ASSERT(!rd.IsZero());
971 SingleEmissionCheckScope guard(this);
972 adr(rd, label);
973 }
Adrp(const Register & rd,Label * label)974 void Adrp(const Register& rd, Label* label) {
975 VIXL_ASSERT(allow_macro_instructions_);
976 VIXL_ASSERT(!rd.IsZero());
977 SingleEmissionCheckScope guard(this);
978 adrp(rd, label);
979 }
Asr(const Register & rd,const Register & rn,unsigned shift)980 void Asr(const Register& rd, const Register& rn, unsigned shift) {
981 VIXL_ASSERT(allow_macro_instructions_);
982 VIXL_ASSERT(!rd.IsZero());
983 VIXL_ASSERT(!rn.IsZero());
984 SingleEmissionCheckScope guard(this);
985 asr(rd, rn, shift);
986 }
Asr(const Register & rd,const Register & rn,const Register & rm)987 void Asr(const Register& rd, const Register& rn, const Register& rm) {
988 VIXL_ASSERT(allow_macro_instructions_);
989 VIXL_ASSERT(!rd.IsZero());
990 VIXL_ASSERT(!rn.IsZero());
991 VIXL_ASSERT(!rm.IsZero());
992 SingleEmissionCheckScope guard(this);
993 asrv(rd, rn, rm);
994 }
995
996 // Branch type inversion relies on these relations.
997 VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
998 (reg_bit_clear == (reg_bit_set ^ 1)) &&
999 (always == (never ^ 1)));
1000
InvertBranchType(BranchType type)1001 BranchType InvertBranchType(BranchType type) {
1002 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
1003 return static_cast<BranchType>(
1004 InvertCondition(static_cast<Condition>(type)));
1005 } else {
1006 return static_cast<BranchType>(type ^ 1);
1007 }
1008 }
1009
1010 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
1011
1012 void B(Label* label);
1013 void B(Label* label, Condition cond);
B(Condition cond,Label * label)1014 void B(Condition cond, Label* label) { B(label, cond); }
Bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1015 void Bfm(const Register& rd,
1016 const Register& rn,
1017 unsigned immr,
1018 unsigned imms) {
1019 VIXL_ASSERT(allow_macro_instructions_);
1020 VIXL_ASSERT(!rd.IsZero());
1021 VIXL_ASSERT(!rn.IsZero());
1022 SingleEmissionCheckScope guard(this);
1023 bfm(rd, rn, immr, imms);
1024 }
Bfi(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1025 void Bfi(const Register& rd,
1026 const Register& rn,
1027 unsigned lsb,
1028 unsigned width) {
1029 VIXL_ASSERT(allow_macro_instructions_);
1030 VIXL_ASSERT(!rd.IsZero());
1031 VIXL_ASSERT(!rn.IsZero());
1032 SingleEmissionCheckScope guard(this);
1033 bfi(rd, rn, lsb, width);
1034 }
Bfxil(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1035 void Bfxil(const Register& rd,
1036 const Register& rn,
1037 unsigned lsb,
1038 unsigned width) {
1039 VIXL_ASSERT(allow_macro_instructions_);
1040 VIXL_ASSERT(!rd.IsZero());
1041 VIXL_ASSERT(!rn.IsZero());
1042 SingleEmissionCheckScope guard(this);
1043 bfxil(rd, rn, lsb, width);
1044 }
1045 void Bind(Label* label);
1046 // Bind a label to a specified offset from the start of the buffer.
1047 void BindToOffset(Label* label, ptrdiff_t offset);
Bl(Label * label)1048 void Bl(Label* label) {
1049 VIXL_ASSERT(allow_macro_instructions_);
1050 SingleEmissionCheckScope guard(this);
1051 bl(label);
1052 }
Blr(const Register & xn)1053 void Blr(const Register& xn) {
1054 VIXL_ASSERT(allow_macro_instructions_);
1055 VIXL_ASSERT(!xn.IsZero());
1056 SingleEmissionCheckScope guard(this);
1057 blr(xn);
1058 }
Br(const Register & xn)1059 void Br(const Register& xn) {
1060 VIXL_ASSERT(allow_macro_instructions_);
1061 VIXL_ASSERT(!xn.IsZero());
1062 SingleEmissionCheckScope guard(this);
1063 br(xn);
1064 }
1065 void Brk(int code = 0) {
1066 VIXL_ASSERT(allow_macro_instructions_);
1067 SingleEmissionCheckScope guard(this);
1068 brk(code);
1069 }
1070 void Cbnz(const Register& rt, Label* label);
1071 void Cbz(const Register& rt, Label* label);
Cinc(const Register & rd,const Register & rn,Condition cond)1072 void Cinc(const Register& rd, const Register& rn, Condition cond) {
1073 VIXL_ASSERT(allow_macro_instructions_);
1074 VIXL_ASSERT(!rd.IsZero());
1075 VIXL_ASSERT(!rn.IsZero());
1076 SingleEmissionCheckScope guard(this);
1077 cinc(rd, rn, cond);
1078 }
Cinv(const Register & rd,const Register & rn,Condition cond)1079 void Cinv(const Register& rd, const Register& rn, Condition cond) {
1080 VIXL_ASSERT(allow_macro_instructions_);
1081 VIXL_ASSERT(!rd.IsZero());
1082 VIXL_ASSERT(!rn.IsZero());
1083 SingleEmissionCheckScope guard(this);
1084 cinv(rd, rn, cond);
1085 }
Clrex()1086 void Clrex() {
1087 VIXL_ASSERT(allow_macro_instructions_);
1088 SingleEmissionCheckScope guard(this);
1089 clrex();
1090 }
Cls(const Register & rd,const Register & rn)1091 void Cls(const Register& rd, const Register& rn) {
1092 VIXL_ASSERT(allow_macro_instructions_);
1093 VIXL_ASSERT(!rd.IsZero());
1094 VIXL_ASSERT(!rn.IsZero());
1095 SingleEmissionCheckScope guard(this);
1096 cls(rd, rn);
1097 }
Clz(const Register & rd,const Register & rn)1098 void Clz(const Register& rd, const Register& rn) {
1099 VIXL_ASSERT(allow_macro_instructions_);
1100 VIXL_ASSERT(!rd.IsZero());
1101 VIXL_ASSERT(!rn.IsZero());
1102 SingleEmissionCheckScope guard(this);
1103 clz(rd, rn);
1104 }
Cneg(const Register & rd,const Register & rn,Condition cond)1105 void Cneg(const Register& rd, const Register& rn, Condition cond) {
1106 VIXL_ASSERT(allow_macro_instructions_);
1107 VIXL_ASSERT(!rd.IsZero());
1108 VIXL_ASSERT(!rn.IsZero());
1109 SingleEmissionCheckScope guard(this);
1110 cneg(rd, rn, cond);
1111 }
Cset(const Register & rd,Condition cond)1112 void Cset(const Register& rd, Condition cond) {
1113 VIXL_ASSERT(allow_macro_instructions_);
1114 VIXL_ASSERT(!rd.IsZero());
1115 SingleEmissionCheckScope guard(this);
1116 cset(rd, cond);
1117 }
Csetm(const Register & rd,Condition cond)1118 void Csetm(const Register& rd, Condition cond) {
1119 VIXL_ASSERT(allow_macro_instructions_);
1120 VIXL_ASSERT(!rd.IsZero());
1121 SingleEmissionCheckScope guard(this);
1122 csetm(rd, cond);
1123 }
Csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)1124 void Csinc(const Register& rd,
1125 const Register& rn,
1126 const Register& rm,
1127 Condition cond) {
1128 VIXL_ASSERT(allow_macro_instructions_);
1129 VIXL_ASSERT(!rd.IsZero());
1130 VIXL_ASSERT(!rn.IsZero());
1131 VIXL_ASSERT(!rm.IsZero());
1132 VIXL_ASSERT((cond != al) && (cond != nv));
1133 SingleEmissionCheckScope guard(this);
1134 csinc(rd, rn, rm, cond);
1135 }
Csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)1136 void Csinv(const Register& rd,
1137 const Register& rn,
1138 const Register& rm,
1139 Condition cond) {
1140 VIXL_ASSERT(allow_macro_instructions_);
1141 VIXL_ASSERT(!rd.IsZero());
1142 VIXL_ASSERT(!rn.IsZero());
1143 VIXL_ASSERT(!rm.IsZero());
1144 VIXL_ASSERT((cond != al) && (cond != nv));
1145 SingleEmissionCheckScope guard(this);
1146 csinv(rd, rn, rm, cond);
1147 }
Csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)1148 void Csneg(const Register& rd,
1149 const Register& rn,
1150 const Register& rm,
1151 Condition cond) {
1152 VIXL_ASSERT(allow_macro_instructions_);
1153 VIXL_ASSERT(!rd.IsZero());
1154 VIXL_ASSERT(!rn.IsZero());
1155 VIXL_ASSERT(!rm.IsZero());
1156 VIXL_ASSERT((cond != al) && (cond != nv));
1157 SingleEmissionCheckScope guard(this);
1158 csneg(rd, rn, rm, cond);
1159 }
Dmb(BarrierDomain domain,BarrierType type)1160 void Dmb(BarrierDomain domain, BarrierType type) {
1161 VIXL_ASSERT(allow_macro_instructions_);
1162 SingleEmissionCheckScope guard(this);
1163 dmb(domain, type);
1164 }
Dsb(BarrierDomain domain,BarrierType type)1165 void Dsb(BarrierDomain domain, BarrierType type) {
1166 VIXL_ASSERT(allow_macro_instructions_);
1167 SingleEmissionCheckScope guard(this);
1168 dsb(domain, type);
1169 }
Extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)1170 void Extr(const Register& rd,
1171 const Register& rn,
1172 const Register& rm,
1173 unsigned lsb) {
1174 VIXL_ASSERT(allow_macro_instructions_);
1175 VIXL_ASSERT(!rd.IsZero());
1176 VIXL_ASSERT(!rn.IsZero());
1177 VIXL_ASSERT(!rm.IsZero());
1178 SingleEmissionCheckScope guard(this);
1179 extr(rd, rn, rm, lsb);
1180 }
Fadd(const VRegister & vd,const VRegister & vn,const VRegister & vm)1181 void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1182 VIXL_ASSERT(allow_macro_instructions_);
1183 SingleEmissionCheckScope guard(this);
1184 fadd(vd, vn, vm);
1185 }
1186 void Fccmp(const VRegister& vn,
1187 const VRegister& vm,
1188 StatusFlags nzcv,
1189 Condition cond,
1190 FPTrapFlags trap = DisableTrap) {
1191 VIXL_ASSERT(allow_macro_instructions_);
1192 VIXL_ASSERT((cond != al) && (cond != nv));
1193 SingleEmissionCheckScope guard(this);
1194 FPCCompareMacro(vn, vm, nzcv, cond, trap);
1195 }
Fccmpe(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)1196 void Fccmpe(const VRegister& vn,
1197 const VRegister& vm,
1198 StatusFlags nzcv,
1199 Condition cond) {
1200 Fccmp(vn, vm, nzcv, cond, EnableTrap);
1201 }
1202 void Fcmp(const VRegister& vn,
1203 const VRegister& vm,
1204 FPTrapFlags trap = DisableTrap) {
1205 VIXL_ASSERT(allow_macro_instructions_);
1206 SingleEmissionCheckScope guard(this);
1207 FPCompareMacro(vn, vm, trap);
1208 }
1209 void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap);
1210 void Fcmpe(const VRegister& vn, double value);
Fcmpe(const VRegister & vn,const VRegister & vm)1211 void Fcmpe(const VRegister& vn, const VRegister& vm) {
1212 Fcmp(vn, vm, EnableTrap);
1213 }
Fcsel(const VRegister & vd,const VRegister & vn,const VRegister & vm,Condition cond)1214 void Fcsel(const VRegister& vd,
1215 const VRegister& vn,
1216 const VRegister& vm,
1217 Condition cond) {
1218 VIXL_ASSERT(allow_macro_instructions_);
1219 VIXL_ASSERT((cond != al) && (cond != nv));
1220 SingleEmissionCheckScope guard(this);
1221 fcsel(vd, vn, vm, cond);
1222 }
Fcvt(const VRegister & vd,const VRegister & vn)1223 void Fcvt(const VRegister& vd, const VRegister& vn) {
1224 VIXL_ASSERT(allow_macro_instructions_);
1225 SingleEmissionCheckScope guard(this);
1226 fcvt(vd, vn);
1227 }
Fcvtl(const VRegister & vd,const VRegister & vn)1228 void Fcvtl(const VRegister& vd, const VRegister& vn) {
1229 VIXL_ASSERT(allow_macro_instructions_);
1230 SingleEmissionCheckScope guard(this);
1231 fcvtl(vd, vn);
1232 }
Fcvtl2(const VRegister & vd,const VRegister & vn)1233 void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1234 VIXL_ASSERT(allow_macro_instructions_);
1235 SingleEmissionCheckScope guard(this);
1236 fcvtl2(vd, vn);
1237 }
Fcvtn(const VRegister & vd,const VRegister & vn)1238 void Fcvtn(const VRegister& vd, const VRegister& vn) {
1239 VIXL_ASSERT(allow_macro_instructions_);
1240 SingleEmissionCheckScope guard(this);
1241 fcvtn(vd, vn);
1242 }
Fcvtn2(const VRegister & vd,const VRegister & vn)1243 void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1244 VIXL_ASSERT(allow_macro_instructions_);
1245 SingleEmissionCheckScope guard(this);
1246 fcvtn2(vd, vn);
1247 }
Fcvtxn(const VRegister & vd,const VRegister & vn)1248 void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1249 VIXL_ASSERT(allow_macro_instructions_);
1250 SingleEmissionCheckScope guard(this);
1251 fcvtxn(vd, vn);
1252 }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1253 void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1254 VIXL_ASSERT(allow_macro_instructions_);
1255 SingleEmissionCheckScope guard(this);
1256 fcvtxn2(vd, vn);
1257 }
Fcvtas(const Register & rd,const VRegister & vn)1258 void Fcvtas(const Register& rd, const VRegister& vn) {
1259 VIXL_ASSERT(allow_macro_instructions_);
1260 VIXL_ASSERT(!rd.IsZero());
1261 SingleEmissionCheckScope guard(this);
1262 fcvtas(rd, vn);
1263 }
Fcvtau(const Register & rd,const VRegister & vn)1264 void Fcvtau(const Register& rd, const VRegister& vn) {
1265 VIXL_ASSERT(allow_macro_instructions_);
1266 VIXL_ASSERT(!rd.IsZero());
1267 SingleEmissionCheckScope guard(this);
1268 fcvtau(rd, vn);
1269 }
Fcvtms(const Register & rd,const VRegister & vn)1270 void Fcvtms(const Register& rd, const VRegister& vn) {
1271 VIXL_ASSERT(allow_macro_instructions_);
1272 VIXL_ASSERT(!rd.IsZero());
1273 SingleEmissionCheckScope guard(this);
1274 fcvtms(rd, vn);
1275 }
Fcvtmu(const Register & rd,const VRegister & vn)1276 void Fcvtmu(const Register& rd, const VRegister& vn) {
1277 VIXL_ASSERT(allow_macro_instructions_);
1278 VIXL_ASSERT(!rd.IsZero());
1279 SingleEmissionCheckScope guard(this);
1280 fcvtmu(rd, vn);
1281 }
Fcvtns(const Register & rd,const VRegister & vn)1282 void Fcvtns(const Register& rd, const VRegister& vn) {
1283 VIXL_ASSERT(allow_macro_instructions_);
1284 VIXL_ASSERT(!rd.IsZero());
1285 SingleEmissionCheckScope guard(this);
1286 fcvtns(rd, vn);
1287 }
Fcvtnu(const Register & rd,const VRegister & vn)1288 void Fcvtnu(const Register& rd, const VRegister& vn) {
1289 VIXL_ASSERT(allow_macro_instructions_);
1290 VIXL_ASSERT(!rd.IsZero());
1291 SingleEmissionCheckScope guard(this);
1292 fcvtnu(rd, vn);
1293 }
Fcvtps(const Register & rd,const VRegister & vn)1294 void Fcvtps(const Register& rd, const VRegister& vn) {
1295 VIXL_ASSERT(allow_macro_instructions_);
1296 VIXL_ASSERT(!rd.IsZero());
1297 SingleEmissionCheckScope guard(this);
1298 fcvtps(rd, vn);
1299 }
Fcvtpu(const Register & rd,const VRegister & vn)1300 void Fcvtpu(const Register& rd, const VRegister& vn) {
1301 VIXL_ASSERT(allow_macro_instructions_);
1302 VIXL_ASSERT(!rd.IsZero());
1303 SingleEmissionCheckScope guard(this);
1304 fcvtpu(rd, vn);
1305 }
1306 void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1307 VIXL_ASSERT(allow_macro_instructions_);
1308 VIXL_ASSERT(!rd.IsZero());
1309 SingleEmissionCheckScope guard(this);
1310 fcvtzs(rd, vn, fbits);
1311 }
1312 void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1313 VIXL_ASSERT(allow_macro_instructions_);
1314 VIXL_ASSERT(!rd.IsZero());
1315 SingleEmissionCheckScope guard(this);
1316 fcvtzu(rd, vn, fbits);
1317 }
Fdiv(const VRegister & vd,const VRegister & vn,const VRegister & vm)1318 void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1319 VIXL_ASSERT(allow_macro_instructions_);
1320 SingleEmissionCheckScope guard(this);
1321 fdiv(vd, vn, vm);
1322 }
Fmax(const VRegister & vd,const VRegister & vn,const VRegister & vm)1323 void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1324 VIXL_ASSERT(allow_macro_instructions_);
1325 SingleEmissionCheckScope guard(this);
1326 fmax(vd, vn, vm);
1327 }
Fmaxnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1328 void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1329 VIXL_ASSERT(allow_macro_instructions_);
1330 SingleEmissionCheckScope guard(this);
1331 fmaxnm(vd, vn, vm);
1332 }
Fmin(const VRegister & vd,const VRegister & vn,const VRegister & vm)1333 void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1334 VIXL_ASSERT(allow_macro_instructions_);
1335 SingleEmissionCheckScope guard(this);
1336 fmin(vd, vn, vm);
1337 }
Fminnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1338 void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1339 VIXL_ASSERT(allow_macro_instructions_);
1340 SingleEmissionCheckScope guard(this);
1341 fminnm(vd, vn, vm);
1342 }
Fmov(const VRegister & vd,const VRegister & vn)1343 void Fmov(const VRegister& vd, const VRegister& vn) {
1344 VIXL_ASSERT(allow_macro_instructions_);
1345 SingleEmissionCheckScope guard(this);
1346 // Only emit an instruction if vd and vn are different, and they are both D
1347 // registers. fmov(s0, s0) is not a no-op because it clears the top word of
1348 // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
1349 // the top of q0, but VRegister does not currently support Q registers.
1350 if (!vd.Is(vn) || !vd.Is64Bits()) {
1351 fmov(vd, vn);
1352 }
1353 }
Fmov(const VRegister & vd,const Register & rn)1354 void Fmov(const VRegister& vd, const Register& rn) {
1355 VIXL_ASSERT(allow_macro_instructions_);
1356 VIXL_ASSERT(!rn.IsZero());
1357 SingleEmissionCheckScope guard(this);
1358 fmov(vd, rn);
1359 }
Fmov(const VRegister & vd,const XRegister & xn)1360 void Fmov(const VRegister& vd, const XRegister& xn) {
1361 Fmov(vd, Register(xn));
1362 }
Fmov(const VRegister & vd,const WRegister & wn)1363 void Fmov(const VRegister& vd, const WRegister& wn) {
1364 Fmov(vd, Register(wn));
1365 }
Fmov(const VRegister & vd,int index,const Register & rn)1366 void Fmov(const VRegister& vd, int index, const Register& rn) {
1367 VIXL_ASSERT(allow_macro_instructions_);
1368 SingleEmissionCheckScope guard(this);
1369 fmov(vd, index, rn);
1370 }
Fmov(const Register & rd,const VRegister & vn,int index)1371 void Fmov(const Register& rd, const VRegister& vn, int index) {
1372 VIXL_ASSERT(allow_macro_instructions_);
1373 SingleEmissionCheckScope guard(this);
1374 fmov(rd, vn, index);
1375 }
1376
1377 // Provide explicit double and float interfaces for FP immediate moves, rather
1378 // than relying on implicit C++ casts. This allows signalling NaNs to be
1379 // preserved when the immediate matches the format of vd. Most systems convert
1380 // signalling NaNs to quiet NaNs when converting between float and double.
1381 void Fmov(VRegister vd, double imm);
1382 void Fmov(VRegister vd, float imm);
1383 // Provide a template to allow other types to be converted automatically.
1384 template <typename T>
Fmov(VRegister vd,T imm)1385 void Fmov(VRegister vd, T imm) {
1386 VIXL_ASSERT(allow_macro_instructions_);
1387 Fmov(vd, static_cast<double>(imm));
1388 }
Fmov(Register rd,VRegister vn)1389 void Fmov(Register rd, VRegister vn) {
1390 VIXL_ASSERT(allow_macro_instructions_);
1391 VIXL_ASSERT(!rd.IsZero());
1392 SingleEmissionCheckScope guard(this);
1393 fmov(rd, vn);
1394 }
Fmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1395 void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1396 VIXL_ASSERT(allow_macro_instructions_);
1397 SingleEmissionCheckScope guard(this);
1398 fmul(vd, vn, vm);
1399 }
Fnmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1400 void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1401 VIXL_ASSERT(allow_macro_instructions_);
1402 SingleEmissionCheckScope guard(this);
1403 fnmul(vd, vn, vm);
1404 }
Fmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1405 void Fmadd(const VRegister& vd,
1406 const VRegister& vn,
1407 const VRegister& vm,
1408 const VRegister& va) {
1409 VIXL_ASSERT(allow_macro_instructions_);
1410 SingleEmissionCheckScope guard(this);
1411 fmadd(vd, vn, vm, va);
1412 }
Fmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1413 void Fmsub(const VRegister& vd,
1414 const VRegister& vn,
1415 const VRegister& vm,
1416 const VRegister& va) {
1417 VIXL_ASSERT(allow_macro_instructions_);
1418 SingleEmissionCheckScope guard(this);
1419 fmsub(vd, vn, vm, va);
1420 }
Fnmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1421 void Fnmadd(const VRegister& vd,
1422 const VRegister& vn,
1423 const VRegister& vm,
1424 const VRegister& va) {
1425 VIXL_ASSERT(allow_macro_instructions_);
1426 SingleEmissionCheckScope guard(this);
1427 fnmadd(vd, vn, vm, va);
1428 }
Fnmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1429 void Fnmsub(const VRegister& vd,
1430 const VRegister& vn,
1431 const VRegister& vm,
1432 const VRegister& va) {
1433 VIXL_ASSERT(allow_macro_instructions_);
1434 SingleEmissionCheckScope guard(this);
1435 fnmsub(vd, vn, vm, va);
1436 }
Fsub(const VRegister & vd,const VRegister & vn,const VRegister & vm)1437 void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1438 VIXL_ASSERT(allow_macro_instructions_);
1439 SingleEmissionCheckScope guard(this);
1440 fsub(vd, vn, vm);
1441 }
Hint(SystemHint code)1442 void Hint(SystemHint code) {
1443 VIXL_ASSERT(allow_macro_instructions_);
1444 SingleEmissionCheckScope guard(this);
1445 hint(code);
1446 }
Hlt(int code)1447 void Hlt(int code) {
1448 VIXL_ASSERT(allow_macro_instructions_);
1449 SingleEmissionCheckScope guard(this);
1450 hlt(code);
1451 }
Isb()1452 void Isb() {
1453 VIXL_ASSERT(allow_macro_instructions_);
1454 SingleEmissionCheckScope guard(this);
1455 isb();
1456 }
Ldar(const Register & rt,const MemOperand & src)1457 void Ldar(const Register& rt, const MemOperand& src) {
1458 VIXL_ASSERT(allow_macro_instructions_);
1459 SingleEmissionCheckScope guard(this);
1460 ldar(rt, src);
1461 }
Ldarb(const Register & rt,const MemOperand & src)1462 void Ldarb(const Register& rt, const MemOperand& src) {
1463 VIXL_ASSERT(allow_macro_instructions_);
1464 SingleEmissionCheckScope guard(this);
1465 ldarb(rt, src);
1466 }
Ldarh(const Register & rt,const MemOperand & src)1467 void Ldarh(const Register& rt, const MemOperand& src) {
1468 VIXL_ASSERT(allow_macro_instructions_);
1469 SingleEmissionCheckScope guard(this);
1470 ldarh(rt, src);
1471 }
Ldaxp(const Register & rt,const Register & rt2,const MemOperand & src)1472 void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1473 VIXL_ASSERT(allow_macro_instructions_);
1474 VIXL_ASSERT(!rt.Aliases(rt2));
1475 SingleEmissionCheckScope guard(this);
1476 ldaxp(rt, rt2, src);
1477 }
Ldaxr(const Register & rt,const MemOperand & src)1478 void Ldaxr(const Register& rt, const MemOperand& src) {
1479 VIXL_ASSERT(allow_macro_instructions_);
1480 SingleEmissionCheckScope guard(this);
1481 ldaxr(rt, src);
1482 }
Ldaxrb(const Register & rt,const MemOperand & src)1483 void Ldaxrb(const Register& rt, const MemOperand& src) {
1484 VIXL_ASSERT(allow_macro_instructions_);
1485 SingleEmissionCheckScope guard(this);
1486 ldaxrb(rt, src);
1487 }
Ldaxrh(const Register & rt,const MemOperand & src)1488 void Ldaxrh(const Register& rt, const MemOperand& src) {
1489 VIXL_ASSERT(allow_macro_instructions_);
1490 SingleEmissionCheckScope guard(this);
1491 ldaxrh(rt, src);
1492 }
Ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1493 void Ldnp(const CPURegister& rt,
1494 const CPURegister& rt2,
1495 const MemOperand& src) {
1496 VIXL_ASSERT(allow_macro_instructions_);
1497 SingleEmissionCheckScope guard(this);
1498 ldnp(rt, rt2, src);
1499 }
1500 // Provide both double and float interfaces for FP immediate loads, rather
1501 // than relying on implicit C++ casts. This allows signalling NaNs to be
1502 // preserved when the immediate matches the format of fd. Most systems convert
1503 // signalling NaNs to quiet NaNs when converting between float and double.
Ldr(const VRegister & vt,double imm)1504 void Ldr(const VRegister& vt, double imm) {
1505 VIXL_ASSERT(allow_macro_instructions_);
1506 SingleEmissionCheckScope guard(this);
1507 RawLiteral* literal;
1508 if (vt.IsD()) {
1509 literal = new Literal<double>(imm,
1510 &literal_pool_,
1511 RawLiteral::kDeletedOnPlacementByPool);
1512 } else {
1513 literal = new Literal<float>(static_cast<float>(imm),
1514 &literal_pool_,
1515 RawLiteral::kDeletedOnPlacementByPool);
1516 }
1517 ldr(vt, literal);
1518 }
Ldr(const VRegister & vt,float imm)1519 void Ldr(const VRegister& vt, float imm) {
1520 VIXL_ASSERT(allow_macro_instructions_);
1521 SingleEmissionCheckScope guard(this);
1522 RawLiteral* literal;
1523 if (vt.IsS()) {
1524 literal = new Literal<float>(imm,
1525 &literal_pool_,
1526 RawLiteral::kDeletedOnPlacementByPool);
1527 } else {
1528 literal = new Literal<double>(static_cast<double>(imm),
1529 &literal_pool_,
1530 RawLiteral::kDeletedOnPlacementByPool);
1531 }
1532 ldr(vt, literal);
1533 }
Ldr(const VRegister & vt,uint64_t high64,uint64_t low64)1534 void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
1535 VIXL_ASSERT(allow_macro_instructions_);
1536 VIXL_ASSERT(vt.IsQ());
1537 SingleEmissionCheckScope guard(this);
1538 ldr(vt,
1539 new Literal<uint64_t>(high64,
1540 low64,
1541 &literal_pool_,
1542 RawLiteral::kDeletedOnPlacementByPool));
1543 }
Ldr(const Register & rt,uint64_t imm)1544 void Ldr(const Register& rt, uint64_t imm) {
1545 VIXL_ASSERT(allow_macro_instructions_);
1546 VIXL_ASSERT(!rt.IsZero());
1547 SingleEmissionCheckScope guard(this);
1548 RawLiteral* literal;
1549 if (rt.Is64Bits()) {
1550 literal = new Literal<uint64_t>(imm,
1551 &literal_pool_,
1552 RawLiteral::kDeletedOnPlacementByPool);
1553 } else {
1554 VIXL_ASSERT(rt.Is32Bits());
1555 VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
1556 literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
1557 &literal_pool_,
1558 RawLiteral::kDeletedOnPlacementByPool);
1559 }
1560 ldr(rt, literal);
1561 }
Ldrsw(const Register & rt,uint32_t imm)1562 void Ldrsw(const Register& rt, uint32_t imm) {
1563 VIXL_ASSERT(allow_macro_instructions_);
1564 VIXL_ASSERT(!rt.IsZero());
1565 SingleEmissionCheckScope guard(this);
1566 ldrsw(rt,
1567 new Literal<uint32_t>(imm,
1568 &literal_pool_,
1569 RawLiteral::kDeletedOnPlacementByPool));
1570 }
Ldr(const CPURegister & rt,RawLiteral * literal)1571 void Ldr(const CPURegister& rt, RawLiteral* literal) {
1572 VIXL_ASSERT(allow_macro_instructions_);
1573 SingleEmissionCheckScope guard(this);
1574 ldr(rt, literal);
1575 }
Ldrsw(const Register & rt,RawLiteral * literal)1576 void Ldrsw(const Register& rt, RawLiteral* literal) {
1577 VIXL_ASSERT(allow_macro_instructions_);
1578 SingleEmissionCheckScope guard(this);
1579 ldrsw(rt, literal);
1580 }
Ldxp(const Register & rt,const Register & rt2,const MemOperand & src)1581 void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1582 VIXL_ASSERT(allow_macro_instructions_);
1583 VIXL_ASSERT(!rt.Aliases(rt2));
1584 SingleEmissionCheckScope guard(this);
1585 ldxp(rt, rt2, src);
1586 }
Ldxr(const Register & rt,const MemOperand & src)1587 void Ldxr(const Register& rt, const MemOperand& src) {
1588 VIXL_ASSERT(allow_macro_instructions_);
1589 SingleEmissionCheckScope guard(this);
1590 ldxr(rt, src);
1591 }
Ldxrb(const Register & rt,const MemOperand & src)1592 void Ldxrb(const Register& rt, const MemOperand& src) {
1593 VIXL_ASSERT(allow_macro_instructions_);
1594 SingleEmissionCheckScope guard(this);
1595 ldxrb(rt, src);
1596 }
Ldxrh(const Register & rt,const MemOperand & src)1597 void Ldxrh(const Register& rt, const MemOperand& src) {
1598 VIXL_ASSERT(allow_macro_instructions_);
1599 SingleEmissionCheckScope guard(this);
1600 ldxrh(rt, src);
1601 }
Lsl(const Register & rd,const Register & rn,unsigned shift)1602 void Lsl(const Register& rd, const Register& rn, unsigned shift) {
1603 VIXL_ASSERT(allow_macro_instructions_);
1604 VIXL_ASSERT(!rd.IsZero());
1605 VIXL_ASSERT(!rn.IsZero());
1606 SingleEmissionCheckScope guard(this);
1607 lsl(rd, rn, shift);
1608 }
Lsl(const Register & rd,const Register & rn,const Register & rm)1609 void Lsl(const Register& rd, const Register& rn, const Register& rm) {
1610 VIXL_ASSERT(allow_macro_instructions_);
1611 VIXL_ASSERT(!rd.IsZero());
1612 VIXL_ASSERT(!rn.IsZero());
1613 VIXL_ASSERT(!rm.IsZero());
1614 SingleEmissionCheckScope guard(this);
1615 lslv(rd, rn, rm);
1616 }
Lsr(const Register & rd,const Register & rn,unsigned shift)1617 void Lsr(const Register& rd, const Register& rn, unsigned shift) {
1618 VIXL_ASSERT(allow_macro_instructions_);
1619 VIXL_ASSERT(!rd.IsZero());
1620 VIXL_ASSERT(!rn.IsZero());
1621 SingleEmissionCheckScope guard(this);
1622 lsr(rd, rn, shift);
1623 }
Lsr(const Register & rd,const Register & rn,const Register & rm)1624 void Lsr(const Register& rd, const Register& rn, const Register& rm) {
1625 VIXL_ASSERT(allow_macro_instructions_);
1626 VIXL_ASSERT(!rd.IsZero());
1627 VIXL_ASSERT(!rn.IsZero());
1628 VIXL_ASSERT(!rm.IsZero());
1629 SingleEmissionCheckScope guard(this);
1630 lsrv(rd, rn, rm);
1631 }
Madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1632 void Madd(const Register& rd,
1633 const Register& rn,
1634 const Register& rm,
1635 const Register& ra) {
1636 VIXL_ASSERT(allow_macro_instructions_);
1637 VIXL_ASSERT(!rd.IsZero());
1638 VIXL_ASSERT(!rn.IsZero());
1639 VIXL_ASSERT(!rm.IsZero());
1640 VIXL_ASSERT(!ra.IsZero());
1641 SingleEmissionCheckScope guard(this);
1642 madd(rd, rn, rm, ra);
1643 }
Mneg(const Register & rd,const Register & rn,const Register & rm)1644 void Mneg(const Register& rd, const Register& rn, const Register& rm) {
1645 VIXL_ASSERT(allow_macro_instructions_);
1646 VIXL_ASSERT(!rd.IsZero());
1647 VIXL_ASSERT(!rn.IsZero());
1648 VIXL_ASSERT(!rm.IsZero());
1649 SingleEmissionCheckScope guard(this);
1650 mneg(rd, rn, rm);
1651 }
1652 void Mov(const Register& rd,
1653 const Register& rn,
1654 DiscardMoveMode discard_mode = kDontDiscardForSameWReg) {
1655 VIXL_ASSERT(allow_macro_instructions_);
1656 // Emit a register move only if the registers are distinct, or if they are
1657 // not X registers.
1658 //
1659 // Note that mov(w0, w0) is not a no-op because it clears the top word of
1660 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
1661 // registers is not required to clear the top word of the X register. In
1662 // this case, the instruction is discarded.
1663 //
1664 // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
1665 if (!rd.Is(rn) ||
1666 (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
1667 SingleEmissionCheckScope guard(this);
1668 mov(rd, rn);
1669 }
1670 }
1671 void Movk(const Register& rd, uint64_t imm, int shift = -1) {
1672 VIXL_ASSERT(allow_macro_instructions_);
1673 VIXL_ASSERT(!rd.IsZero());
1674 SingleEmissionCheckScope guard(this);
1675 movk(rd, imm, shift);
1676 }
Mrs(const Register & rt,SystemRegister sysreg)1677 void Mrs(const Register& rt, SystemRegister sysreg) {
1678 VIXL_ASSERT(allow_macro_instructions_);
1679 VIXL_ASSERT(!rt.IsZero());
1680 SingleEmissionCheckScope guard(this);
1681 mrs(rt, sysreg);
1682 }
Msr(SystemRegister sysreg,const Register & rt)1683 void Msr(SystemRegister sysreg, const Register& rt) {
1684 VIXL_ASSERT(allow_macro_instructions_);
1685 VIXL_ASSERT(!rt.IsZero());
1686 SingleEmissionCheckScope guard(this);
1687 msr(sysreg, rt);
1688 }
1689 void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
1690 VIXL_ASSERT(allow_macro_instructions_);
1691 SingleEmissionCheckScope guard(this);
1692 sys(op1, crn, crm, op2, rt);
1693 }
Dc(DataCacheOp op,const Register & rt)1694 void Dc(DataCacheOp op, const Register& rt) {
1695 VIXL_ASSERT(allow_macro_instructions_);
1696 SingleEmissionCheckScope guard(this);
1697 dc(op, rt);
1698 }
Ic(InstructionCacheOp op,const Register & rt)1699 void Ic(InstructionCacheOp op, const Register& rt) {
1700 VIXL_ASSERT(allow_macro_instructions_);
1701 SingleEmissionCheckScope guard(this);
1702 ic(op, rt);
1703 }
Msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1704 void Msub(const Register& rd,
1705 const Register& rn,
1706 const Register& rm,
1707 const Register& ra) {
1708 VIXL_ASSERT(allow_macro_instructions_);
1709 VIXL_ASSERT(!rd.IsZero());
1710 VIXL_ASSERT(!rn.IsZero());
1711 VIXL_ASSERT(!rm.IsZero());
1712 VIXL_ASSERT(!ra.IsZero());
1713 SingleEmissionCheckScope guard(this);
1714 msub(rd, rn, rm, ra);
1715 }
Mul(const Register & rd,const Register & rn,const Register & rm)1716 void Mul(const Register& rd, const Register& rn, const Register& rm) {
1717 VIXL_ASSERT(allow_macro_instructions_);
1718 VIXL_ASSERT(!rd.IsZero());
1719 VIXL_ASSERT(!rn.IsZero());
1720 VIXL_ASSERT(!rm.IsZero());
1721 SingleEmissionCheckScope guard(this);
1722 mul(rd, rn, rm);
1723 }
Nop()1724 void Nop() {
1725 VIXL_ASSERT(allow_macro_instructions_);
1726 SingleEmissionCheckScope guard(this);
1727 nop();
1728 }
Rbit(const Register & rd,const Register & rn)1729 void Rbit(const Register& rd, const Register& rn) {
1730 VIXL_ASSERT(allow_macro_instructions_);
1731 VIXL_ASSERT(!rd.IsZero());
1732 VIXL_ASSERT(!rn.IsZero());
1733 SingleEmissionCheckScope guard(this);
1734 rbit(rd, rn);
1735 }
1736 void Ret(const Register& xn = lr) {
1737 VIXL_ASSERT(allow_macro_instructions_);
1738 VIXL_ASSERT(!xn.IsZero());
1739 SingleEmissionCheckScope guard(this);
1740 ret(xn);
1741 }
Rev(const Register & rd,const Register & rn)1742 void Rev(const Register& rd, const Register& rn) {
1743 VIXL_ASSERT(allow_macro_instructions_);
1744 VIXL_ASSERT(!rd.IsZero());
1745 VIXL_ASSERT(!rn.IsZero());
1746 SingleEmissionCheckScope guard(this);
1747 rev(rd, rn);
1748 }
Rev16(const Register & rd,const Register & rn)1749 void Rev16(const Register& rd, const Register& rn) {
1750 VIXL_ASSERT(allow_macro_instructions_);
1751 VIXL_ASSERT(!rd.IsZero());
1752 VIXL_ASSERT(!rn.IsZero());
1753 SingleEmissionCheckScope guard(this);
1754 rev16(rd, rn);
1755 }
Rev32(const Register & rd,const Register & rn)1756 void Rev32(const Register& rd, const Register& rn) {
1757 VIXL_ASSERT(allow_macro_instructions_);
1758 VIXL_ASSERT(!rd.IsZero());
1759 VIXL_ASSERT(!rn.IsZero());
1760 SingleEmissionCheckScope guard(this);
1761 rev32(rd, rn);
1762 }
Ror(const Register & rd,const Register & rs,unsigned shift)1763 void Ror(const Register& rd, const Register& rs, unsigned shift) {
1764 VIXL_ASSERT(allow_macro_instructions_);
1765 VIXL_ASSERT(!rd.IsZero());
1766 VIXL_ASSERT(!rs.IsZero());
1767 SingleEmissionCheckScope guard(this);
1768 ror(rd, rs, shift);
1769 }
Ror(const Register & rd,const Register & rn,const Register & rm)1770 void Ror(const Register& rd, const Register& rn, const Register& rm) {
1771 VIXL_ASSERT(allow_macro_instructions_);
1772 VIXL_ASSERT(!rd.IsZero());
1773 VIXL_ASSERT(!rn.IsZero());
1774 VIXL_ASSERT(!rm.IsZero());
1775 SingleEmissionCheckScope guard(this);
1776 rorv(rd, rn, rm);
1777 }
Sbfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1778 void Sbfiz(const Register& rd,
1779 const Register& rn,
1780 unsigned lsb,
1781 unsigned width) {
1782 VIXL_ASSERT(allow_macro_instructions_);
1783 VIXL_ASSERT(!rd.IsZero());
1784 VIXL_ASSERT(!rn.IsZero());
1785 SingleEmissionCheckScope guard(this);
1786 sbfiz(rd, rn, lsb, width);
1787 }
Sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1788 void Sbfm(const Register& rd,
1789 const Register& rn,
1790 unsigned immr,
1791 unsigned imms) {
1792 VIXL_ASSERT(allow_macro_instructions_);
1793 VIXL_ASSERT(!rd.IsZero());
1794 VIXL_ASSERT(!rn.IsZero());
1795 SingleEmissionCheckScope guard(this);
1796 sbfm(rd, rn, immr, imms);
1797 }
Sbfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1798 void Sbfx(const Register& rd,
1799 const Register& rn,
1800 unsigned lsb,
1801 unsigned width) {
1802 VIXL_ASSERT(allow_macro_instructions_);
1803 VIXL_ASSERT(!rd.IsZero());
1804 VIXL_ASSERT(!rn.IsZero());
1805 SingleEmissionCheckScope guard(this);
1806 sbfx(rd, rn, lsb, width);
1807 }
1808 void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
1809 VIXL_ASSERT(allow_macro_instructions_);
1810 VIXL_ASSERT(!rn.IsZero());
1811 SingleEmissionCheckScope guard(this);
1812 scvtf(vd, rn, fbits);
1813 }
Sdiv(const Register & rd,const Register & rn,const Register & rm)1814 void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
1815 VIXL_ASSERT(allow_macro_instructions_);
1816 VIXL_ASSERT(!rd.IsZero());
1817 VIXL_ASSERT(!rn.IsZero());
1818 VIXL_ASSERT(!rm.IsZero());
1819 SingleEmissionCheckScope guard(this);
1820 sdiv(rd, rn, rm);
1821 }
Smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1822 void Smaddl(const Register& rd,
1823 const Register& rn,
1824 const Register& rm,
1825 const Register& ra) {
1826 VIXL_ASSERT(allow_macro_instructions_);
1827 VIXL_ASSERT(!rd.IsZero());
1828 VIXL_ASSERT(!rn.IsZero());
1829 VIXL_ASSERT(!rm.IsZero());
1830 VIXL_ASSERT(!ra.IsZero());
1831 SingleEmissionCheckScope guard(this);
1832 smaddl(rd, rn, rm, ra);
1833 }
Smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)1834 void Smsubl(const Register& rd,
1835 const Register& rn,
1836 const Register& rm,
1837 const Register& ra) {
1838 VIXL_ASSERT(allow_macro_instructions_);
1839 VIXL_ASSERT(!rd.IsZero());
1840 VIXL_ASSERT(!rn.IsZero());
1841 VIXL_ASSERT(!rm.IsZero());
1842 VIXL_ASSERT(!ra.IsZero());
1843 SingleEmissionCheckScope guard(this);
1844 smsubl(rd, rn, rm, ra);
1845 }
Smull(const Register & rd,const Register & rn,const Register & rm)1846 void Smull(const Register& rd, const Register& rn, const Register& rm) {
1847 VIXL_ASSERT(allow_macro_instructions_);
1848 VIXL_ASSERT(!rd.IsZero());
1849 VIXL_ASSERT(!rn.IsZero());
1850 VIXL_ASSERT(!rm.IsZero());
1851 SingleEmissionCheckScope guard(this);
1852 smull(rd, rn, rm);
1853 }
Smulh(const Register & xd,const Register & xn,const Register & xm)1854 void Smulh(const Register& xd, const Register& xn, const Register& xm) {
1855 VIXL_ASSERT(allow_macro_instructions_);
1856 VIXL_ASSERT(!xd.IsZero());
1857 VIXL_ASSERT(!xn.IsZero());
1858 VIXL_ASSERT(!xm.IsZero());
1859 SingleEmissionCheckScope guard(this);
1860 smulh(xd, xn, xm);
1861 }
Stlr(const Register & rt,const MemOperand & dst)1862 void Stlr(const Register& rt, const MemOperand& dst) {
1863 VIXL_ASSERT(allow_macro_instructions_);
1864 SingleEmissionCheckScope guard(this);
1865 stlr(rt, dst);
1866 }
Stlrb(const Register & rt,const MemOperand & dst)1867 void Stlrb(const Register& rt, const MemOperand& dst) {
1868 VIXL_ASSERT(allow_macro_instructions_);
1869 SingleEmissionCheckScope guard(this);
1870 stlrb(rt, dst);
1871 }
Stlrh(const Register & rt,const MemOperand & dst)1872 void Stlrh(const Register& rt, const MemOperand& dst) {
1873 VIXL_ASSERT(allow_macro_instructions_);
1874 SingleEmissionCheckScope guard(this);
1875 stlrh(rt, dst);
1876 }
Stlxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1877 void Stlxp(const Register& rs,
1878 const Register& rt,
1879 const Register& rt2,
1880 const MemOperand& dst) {
1881 VIXL_ASSERT(allow_macro_instructions_);
1882 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1883 VIXL_ASSERT(!rs.Aliases(rt));
1884 VIXL_ASSERT(!rs.Aliases(rt2));
1885 SingleEmissionCheckScope guard(this);
1886 stlxp(rs, rt, rt2, dst);
1887 }
Stlxr(const Register & rs,const Register & rt,const MemOperand & dst)1888 void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1889 VIXL_ASSERT(allow_macro_instructions_);
1890 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1891 VIXL_ASSERT(!rs.Aliases(rt));
1892 SingleEmissionCheckScope guard(this);
1893 stlxr(rs, rt, dst);
1894 }
Stlxrb(const Register & rs,const Register & rt,const MemOperand & dst)1895 void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1896 VIXL_ASSERT(allow_macro_instructions_);
1897 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1898 VIXL_ASSERT(!rs.Aliases(rt));
1899 SingleEmissionCheckScope guard(this);
1900 stlxrb(rs, rt, dst);
1901 }
Stlxrh(const Register & rs,const Register & rt,const MemOperand & dst)1902 void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1903 VIXL_ASSERT(allow_macro_instructions_);
1904 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1905 VIXL_ASSERT(!rs.Aliases(rt));
1906 SingleEmissionCheckScope guard(this);
1907 stlxrh(rs, rt, dst);
1908 }
Stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)1909 void Stnp(const CPURegister& rt,
1910 const CPURegister& rt2,
1911 const MemOperand& dst) {
1912 VIXL_ASSERT(allow_macro_instructions_);
1913 SingleEmissionCheckScope guard(this);
1914 stnp(rt, rt2, dst);
1915 }
Stxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)1916 void Stxp(const Register& rs,
1917 const Register& rt,
1918 const Register& rt2,
1919 const MemOperand& dst) {
1920 VIXL_ASSERT(allow_macro_instructions_);
1921 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1922 VIXL_ASSERT(!rs.Aliases(rt));
1923 VIXL_ASSERT(!rs.Aliases(rt2));
1924 SingleEmissionCheckScope guard(this);
1925 stxp(rs, rt, rt2, dst);
1926 }
Stxr(const Register & rs,const Register & rt,const MemOperand & dst)1927 void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
1928 VIXL_ASSERT(allow_macro_instructions_);
1929 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1930 VIXL_ASSERT(!rs.Aliases(rt));
1931 SingleEmissionCheckScope guard(this);
1932 stxr(rs, rt, dst);
1933 }
Stxrb(const Register & rs,const Register & rt,const MemOperand & dst)1934 void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
1935 VIXL_ASSERT(allow_macro_instructions_);
1936 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1937 VIXL_ASSERT(!rs.Aliases(rt));
1938 SingleEmissionCheckScope guard(this);
1939 stxrb(rs, rt, dst);
1940 }
Stxrh(const Register & rs,const Register & rt,const MemOperand & dst)1941 void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
1942 VIXL_ASSERT(allow_macro_instructions_);
1943 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
1944 VIXL_ASSERT(!rs.Aliases(rt));
1945 SingleEmissionCheckScope guard(this);
1946 stxrh(rs, rt, dst);
1947 }
Svc(int code)1948 void Svc(int code) {
1949 VIXL_ASSERT(allow_macro_instructions_);
1950 SingleEmissionCheckScope guard(this);
1951 svc(code);
1952 }
Sxtb(const Register & rd,const Register & rn)1953 void Sxtb(const Register& rd, const Register& rn) {
1954 VIXL_ASSERT(allow_macro_instructions_);
1955 VIXL_ASSERT(!rd.IsZero());
1956 VIXL_ASSERT(!rn.IsZero());
1957 SingleEmissionCheckScope guard(this);
1958 sxtb(rd, rn);
1959 }
Sxth(const Register & rd,const Register & rn)1960 void Sxth(const Register& rd, const Register& rn) {
1961 VIXL_ASSERT(allow_macro_instructions_);
1962 VIXL_ASSERT(!rd.IsZero());
1963 VIXL_ASSERT(!rn.IsZero());
1964 SingleEmissionCheckScope guard(this);
1965 sxth(rd, rn);
1966 }
Sxtw(const Register & rd,const Register & rn)1967 void Sxtw(const Register& rd, const Register& rn) {
1968 VIXL_ASSERT(allow_macro_instructions_);
1969 VIXL_ASSERT(!rd.IsZero());
1970 VIXL_ASSERT(!rn.IsZero());
1971 SingleEmissionCheckScope guard(this);
1972 sxtw(rd, rn);
1973 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)1974 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1975 VIXL_ASSERT(allow_macro_instructions_);
1976 SingleEmissionCheckScope guard(this);
1977 tbl(vd, vn, vm);
1978 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)1979 void Tbl(const VRegister& vd,
1980 const VRegister& vn,
1981 const VRegister& vn2,
1982 const VRegister& vm) {
1983 VIXL_ASSERT(allow_macro_instructions_);
1984 SingleEmissionCheckScope guard(this);
1985 tbl(vd, vn, vn2, vm);
1986 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)1987 void Tbl(const VRegister& vd,
1988 const VRegister& vn,
1989 const VRegister& vn2,
1990 const VRegister& vn3,
1991 const VRegister& vm) {
1992 VIXL_ASSERT(allow_macro_instructions_);
1993 SingleEmissionCheckScope guard(this);
1994 tbl(vd, vn, vn2, vn3, vm);
1995 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)1996 void Tbl(const VRegister& vd,
1997 const VRegister& vn,
1998 const VRegister& vn2,
1999 const VRegister& vn3,
2000 const VRegister& vn4,
2001 const VRegister& vm) {
2002 VIXL_ASSERT(allow_macro_instructions_);
2003 SingleEmissionCheckScope guard(this);
2004 tbl(vd, vn, vn2, vn3, vn4, vm);
2005 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)2006 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
2007 VIXL_ASSERT(allow_macro_instructions_);
2008 SingleEmissionCheckScope guard(this);
2009 tbx(vd, vn, vm);
2010 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)2011 void Tbx(const VRegister& vd,
2012 const VRegister& vn,
2013 const VRegister& vn2,
2014 const VRegister& vm) {
2015 VIXL_ASSERT(allow_macro_instructions_);
2016 SingleEmissionCheckScope guard(this);
2017 tbx(vd, vn, vn2, vm);
2018 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)2019 void Tbx(const VRegister& vd,
2020 const VRegister& vn,
2021 const VRegister& vn2,
2022 const VRegister& vn3,
2023 const VRegister& vm) {
2024 VIXL_ASSERT(allow_macro_instructions_);
2025 SingleEmissionCheckScope guard(this);
2026 tbx(vd, vn, vn2, vn3, vm);
2027 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)2028 void Tbx(const VRegister& vd,
2029 const VRegister& vn,
2030 const VRegister& vn2,
2031 const VRegister& vn3,
2032 const VRegister& vn4,
2033 const VRegister& vm) {
2034 VIXL_ASSERT(allow_macro_instructions_);
2035 SingleEmissionCheckScope guard(this);
2036 tbx(vd, vn, vn2, vn3, vn4, vm);
2037 }
2038 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
2039 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
Ubfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2040 void Ubfiz(const Register& rd,
2041 const Register& rn,
2042 unsigned lsb,
2043 unsigned width) {
2044 VIXL_ASSERT(allow_macro_instructions_);
2045 VIXL_ASSERT(!rd.IsZero());
2046 VIXL_ASSERT(!rn.IsZero());
2047 SingleEmissionCheckScope guard(this);
2048 ubfiz(rd, rn, lsb, width);
2049 }
Ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2050 void Ubfm(const Register& rd,
2051 const Register& rn,
2052 unsigned immr,
2053 unsigned imms) {
2054 VIXL_ASSERT(allow_macro_instructions_);
2055 VIXL_ASSERT(!rd.IsZero());
2056 VIXL_ASSERT(!rn.IsZero());
2057 SingleEmissionCheckScope guard(this);
2058 ubfm(rd, rn, immr, imms);
2059 }
Ubfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2060 void Ubfx(const Register& rd,
2061 const Register& rn,
2062 unsigned lsb,
2063 unsigned width) {
2064 VIXL_ASSERT(allow_macro_instructions_);
2065 VIXL_ASSERT(!rd.IsZero());
2066 VIXL_ASSERT(!rn.IsZero());
2067 SingleEmissionCheckScope guard(this);
2068 ubfx(rd, rn, lsb, width);
2069 }
2070 void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2071 VIXL_ASSERT(allow_macro_instructions_);
2072 VIXL_ASSERT(!rn.IsZero());
2073 SingleEmissionCheckScope guard(this);
2074 ucvtf(vd, rn, fbits);
2075 }
Udiv(const Register & rd,const Register & rn,const Register & rm)2076 void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2077 VIXL_ASSERT(allow_macro_instructions_);
2078 VIXL_ASSERT(!rd.IsZero());
2079 VIXL_ASSERT(!rn.IsZero());
2080 VIXL_ASSERT(!rm.IsZero());
2081 SingleEmissionCheckScope guard(this);
2082 udiv(rd, rn, rm);
2083 }
Umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2084 void Umaddl(const Register& rd,
2085 const Register& rn,
2086 const Register& rm,
2087 const Register& ra) {
2088 VIXL_ASSERT(allow_macro_instructions_);
2089 VIXL_ASSERT(!rd.IsZero());
2090 VIXL_ASSERT(!rn.IsZero());
2091 VIXL_ASSERT(!rm.IsZero());
2092 VIXL_ASSERT(!ra.IsZero());
2093 SingleEmissionCheckScope guard(this);
2094 umaddl(rd, rn, rm, ra);
2095 }
Umull(const Register & rd,const Register & rn,const Register & rm)2096 void Umull(const Register& rd, const Register& rn, const Register& rm) {
2097 VIXL_ASSERT(allow_macro_instructions_);
2098 VIXL_ASSERT(!rd.IsZero());
2099 VIXL_ASSERT(!rn.IsZero());
2100 VIXL_ASSERT(!rm.IsZero());
2101 SingleEmissionCheckScope guard(this);
2102 umull(rd, rn, rm);
2103 }
Umulh(const Register & xd,const Register & xn,const Register & xm)2104 void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2105 VIXL_ASSERT(allow_macro_instructions_);
2106 VIXL_ASSERT(!xd.IsZero());
2107 VIXL_ASSERT(!xn.IsZero());
2108 VIXL_ASSERT(!xm.IsZero());
2109 SingleEmissionCheckScope guard(this);
2110 umulh(xd, xn, xm);
2111 }
Umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2112 void Umsubl(const Register& rd,
2113 const Register& rn,
2114 const Register& rm,
2115 const Register& ra) {
2116 VIXL_ASSERT(allow_macro_instructions_);
2117 VIXL_ASSERT(!rd.IsZero());
2118 VIXL_ASSERT(!rn.IsZero());
2119 VIXL_ASSERT(!rm.IsZero());
2120 VIXL_ASSERT(!ra.IsZero());
2121 SingleEmissionCheckScope guard(this);
2122 umsubl(rd, rn, rm, ra);
2123 }
Unreachable()2124 void Unreachable() {
2125 VIXL_ASSERT(allow_macro_instructions_);
2126 SingleEmissionCheckScope guard(this);
2127 if (generate_simulator_code_) {
2128 hlt(kUnreachableOpcode);
2129 } else {
2130 // Branch to 0 to generate a segfault.
2131 // lr - kInstructionSize is the address of the offending instruction.
2132 blr(xzr);
2133 }
2134 }
Uxtb(const Register & rd,const Register & rn)2135 void Uxtb(const Register& rd, const Register& rn) {
2136 VIXL_ASSERT(allow_macro_instructions_);
2137 VIXL_ASSERT(!rd.IsZero());
2138 VIXL_ASSERT(!rn.IsZero());
2139 SingleEmissionCheckScope guard(this);
2140 uxtb(rd, rn);
2141 }
Uxth(const Register & rd,const Register & rn)2142 void Uxth(const Register& rd, const Register& rn) {
2143 VIXL_ASSERT(allow_macro_instructions_);
2144 VIXL_ASSERT(!rd.IsZero());
2145 VIXL_ASSERT(!rn.IsZero());
2146 SingleEmissionCheckScope guard(this);
2147 uxth(rd, rn);
2148 }
Uxtw(const Register & rd,const Register & rn)2149 void Uxtw(const Register& rd, const Register& rn) {
2150 VIXL_ASSERT(allow_macro_instructions_);
2151 VIXL_ASSERT(!rd.IsZero());
2152 VIXL_ASSERT(!rn.IsZero());
2153 SingleEmissionCheckScope guard(this);
2154 uxtw(rd, rn);
2155 }
2156
2157 // NEON 3 vector register instructions.
2158 #define NEON_3VREG_MACRO_LIST(V) \
2159 V(add, Add) \
2160 V(addhn, Addhn) \
2161 V(addhn2, Addhn2) \
2162 V(addp, Addp) \
2163 V(and_, And) \
2164 V(bic, Bic) \
2165 V(bif, Bif) \
2166 V(bit, Bit) \
2167 V(bsl, Bsl) \
2168 V(cmeq, Cmeq) \
2169 V(cmge, Cmge) \
2170 V(cmgt, Cmgt) \
2171 V(cmhi, Cmhi) \
2172 V(cmhs, Cmhs) \
2173 V(cmtst, Cmtst) \
2174 V(eor, Eor) \
2175 V(fabd, Fabd) \
2176 V(facge, Facge) \
2177 V(facgt, Facgt) \
2178 V(faddp, Faddp) \
2179 V(fcmeq, Fcmeq) \
2180 V(fcmge, Fcmge) \
2181 V(fcmgt, Fcmgt) \
2182 V(fmaxnmp, Fmaxnmp) \
2183 V(fmaxp, Fmaxp) \
2184 V(fminnmp, Fminnmp) \
2185 V(fminp, Fminp) \
2186 V(fmla, Fmla) \
2187 V(fmls, Fmls) \
2188 V(fmulx, Fmulx) \
2189 V(frecps, Frecps) \
2190 V(frsqrts, Frsqrts) \
2191 V(mla, Mla) \
2192 V(mls, Mls) \
2193 V(mul, Mul) \
2194 V(orn, Orn) \
2195 V(orr, Orr) \
2196 V(pmul, Pmul) \
2197 V(pmull, Pmull) \
2198 V(pmull2, Pmull2) \
2199 V(raddhn, Raddhn) \
2200 V(raddhn2, Raddhn2) \
2201 V(rsubhn, Rsubhn) \
2202 V(rsubhn2, Rsubhn2) \
2203 V(saba, Saba) \
2204 V(sabal, Sabal) \
2205 V(sabal2, Sabal2) \
2206 V(sabd, Sabd) \
2207 V(sabdl, Sabdl) \
2208 V(sabdl2, Sabdl2) \
2209 V(saddl, Saddl) \
2210 V(saddl2, Saddl2) \
2211 V(saddw, Saddw) \
2212 V(saddw2, Saddw2) \
2213 V(shadd, Shadd) \
2214 V(shsub, Shsub) \
2215 V(smax, Smax) \
2216 V(smaxp, Smaxp) \
2217 V(smin, Smin) \
2218 V(sminp, Sminp) \
2219 V(smlal, Smlal) \
2220 V(smlal2, Smlal2) \
2221 V(smlsl, Smlsl) \
2222 V(smlsl2, Smlsl2) \
2223 V(smull, Smull) \
2224 V(smull2, Smull2) \
2225 V(sqadd, Sqadd) \
2226 V(sqdmlal, Sqdmlal) \
2227 V(sqdmlal2, Sqdmlal2) \
2228 V(sqdmlsl, Sqdmlsl) \
2229 V(sqdmlsl2, Sqdmlsl2) \
2230 V(sqdmulh, Sqdmulh) \
2231 V(sqdmull, Sqdmull) \
2232 V(sqdmull2, Sqdmull2) \
2233 V(sqrdmulh, Sqrdmulh) \
2234 V(sqrshl, Sqrshl) \
2235 V(sqshl, Sqshl) \
2236 V(sqsub, Sqsub) \
2237 V(srhadd, Srhadd) \
2238 V(srshl, Srshl) \
2239 V(sshl, Sshl) \
2240 V(ssubl, Ssubl) \
2241 V(ssubl2, Ssubl2) \
2242 V(ssubw, Ssubw) \
2243 V(ssubw2, Ssubw2) \
2244 V(sub, Sub) \
2245 V(subhn, Subhn) \
2246 V(subhn2, Subhn2) \
2247 V(trn1, Trn1) \
2248 V(trn2, Trn2) \
2249 V(uaba, Uaba) \
2250 V(uabal, Uabal) \
2251 V(uabal2, Uabal2) \
2252 V(uabd, Uabd) \
2253 V(uabdl, Uabdl) \
2254 V(uabdl2, Uabdl2) \
2255 V(uaddl, Uaddl) \
2256 V(uaddl2, Uaddl2) \
2257 V(uaddw, Uaddw) \
2258 V(uaddw2, Uaddw2) \
2259 V(uhadd, Uhadd) \
2260 V(uhsub, Uhsub) \
2261 V(umax, Umax) \
2262 V(umaxp, Umaxp) \
2263 V(umin, Umin) \
2264 V(uminp, Uminp) \
2265 V(umlal, Umlal) \
2266 V(umlal2, Umlal2) \
2267 V(umlsl, Umlsl) \
2268 V(umlsl2, Umlsl2) \
2269 V(umull, Umull) \
2270 V(umull2, Umull2) \
2271 V(uqadd, Uqadd) \
2272 V(uqrshl, Uqrshl) \
2273 V(uqshl, Uqshl) \
2274 V(uqsub, Uqsub) \
2275 V(urhadd, Urhadd) \
2276 V(urshl, Urshl) \
2277 V(ushl, Ushl) \
2278 V(usubl, Usubl) \
2279 V(usubl2, Usubl2) \
2280 V(usubw, Usubw) \
2281 V(usubw2, Usubw2) \
2282 V(uzp1, Uzp1) \
2283 V(uzp2, Uzp2) \
2284 V(zip1, Zip1) \
2285 V(zip2, Zip2)
2286
2287 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2288 void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
2289 VIXL_ASSERT(allow_macro_instructions_); \
2290 SingleEmissionCheckScope guard(this); \
2291 ASM(vd, vn, vm); \
2292 }
2293 NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2294 #undef DEFINE_MACRO_ASM_FUNC
2295
2296 // NEON 2 vector register instructions.
2297 #define NEON_2VREG_MACRO_LIST(V) \
2298 V(abs, Abs) \
2299 V(addp, Addp) \
2300 V(addv, Addv) \
2301 V(cls, Cls) \
2302 V(clz, Clz) \
2303 V(cnt, Cnt) \
2304 V(fabs, Fabs) \
2305 V(faddp, Faddp) \
2306 V(fcvtas, Fcvtas) \
2307 V(fcvtau, Fcvtau) \
2308 V(fcvtms, Fcvtms) \
2309 V(fcvtmu, Fcvtmu) \
2310 V(fcvtns, Fcvtns) \
2311 V(fcvtnu, Fcvtnu) \
2312 V(fcvtps, Fcvtps) \
2313 V(fcvtpu, Fcvtpu) \
2314 V(fmaxnmp, Fmaxnmp) \
2315 V(fmaxnmv, Fmaxnmv) \
2316 V(fmaxp, Fmaxp) \
2317 V(fmaxv, Fmaxv) \
2318 V(fminnmp, Fminnmp) \
2319 V(fminnmv, Fminnmv) \
2320 V(fminp, Fminp) \
2321 V(fminv, Fminv) \
2322 V(fneg, Fneg) \
2323 V(frecpe, Frecpe) \
2324 V(frecpx, Frecpx) \
2325 V(frinta, Frinta) \
2326 V(frinti, Frinti) \
2327 V(frintm, Frintm) \
2328 V(frintn, Frintn) \
2329 V(frintp, Frintp) \
2330 V(frintx, Frintx) \
2331 V(frintz, Frintz) \
2332 V(frsqrte, Frsqrte) \
2333 V(fsqrt, Fsqrt) \
2334 V(mov, Mov) \
2335 V(mvn, Mvn) \
2336 V(neg, Neg) \
2337 V(not_, Not) \
2338 V(rbit, Rbit) \
2339 V(rev16, Rev16) \
2340 V(rev32, Rev32) \
2341 V(rev64, Rev64) \
2342 V(sadalp, Sadalp) \
2343 V(saddlp, Saddlp) \
2344 V(saddlv, Saddlv) \
2345 V(smaxv, Smaxv) \
2346 V(sminv, Sminv) \
2347 V(sqabs, Sqabs) \
2348 V(sqneg, Sqneg) \
2349 V(sqxtn, Sqxtn) \
2350 V(sqxtn2, Sqxtn2) \
2351 V(sqxtun, Sqxtun) \
2352 V(sqxtun2, Sqxtun2) \
2353 V(suqadd, Suqadd) \
2354 V(sxtl, Sxtl) \
2355 V(sxtl2, Sxtl2) \
2356 V(uadalp, Uadalp) \
2357 V(uaddlp, Uaddlp) \
2358 V(uaddlv, Uaddlv) \
2359 V(umaxv, Umaxv) \
2360 V(uminv, Uminv) \
2361 V(uqxtn, Uqxtn) \
2362 V(uqxtn2, Uqxtn2) \
2363 V(urecpe, Urecpe) \
2364 V(ursqrte, Ursqrte) \
2365 V(usqadd, Usqadd) \
2366 V(uxtl, Uxtl) \
2367 V(uxtl2, Uxtl2) \
2368 V(xtn, Xtn) \
2369 V(xtn2, Xtn2)
2370
2371 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2372 void MASM(const VRegister& vd, const VRegister& vn) { \
2373 VIXL_ASSERT(allow_macro_instructions_); \
2374 SingleEmissionCheckScope guard(this); \
2375 ASM(vd, vn); \
2376 }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)2377 NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2378 #undef DEFINE_MACRO_ASM_FUNC
2379
2380 // NEON 2 vector register with immediate instructions.
2381 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
2382 V(fcmeq, Fcmeq) \
2383 V(fcmge, Fcmge) \
2384 V(fcmgt, Fcmgt) \
2385 V(fcmle, Fcmle) \
2386 V(fcmlt, Fcmlt)
2387
2388 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2389 void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
2390 VIXL_ASSERT(allow_macro_instructions_); \
2391 SingleEmissionCheckScope guard(this); \
2392 ASM(vd, vn, imm); \
2393 }
2394 NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2395 #undef DEFINE_MACRO_ASM_FUNC
2396
2397 // NEON by element instructions.
2398 #define NEON_BYELEMENT_MACRO_LIST(V) \
2399 V(fmul, Fmul) \
2400 V(fmla, Fmla) \
2401 V(fmls, Fmls) \
2402 V(fmulx, Fmulx) \
2403 V(mul, Mul) \
2404 V(mla, Mla) \
2405 V(mls, Mls) \
2406 V(sqdmulh, Sqdmulh) \
2407 V(sqrdmulh, Sqrdmulh) \
2408 V(sqdmull, Sqdmull) \
2409 V(sqdmull2, Sqdmull2) \
2410 V(sqdmlal, Sqdmlal) \
2411 V(sqdmlal2, Sqdmlal2) \
2412 V(sqdmlsl, Sqdmlsl) \
2413 V(sqdmlsl2, Sqdmlsl2) \
2414 V(smull, Smull) \
2415 V(smull2, Smull2) \
2416 V(smlal, Smlal) \
2417 V(smlal2, Smlal2) \
2418 V(smlsl, Smlsl) \
2419 V(smlsl2, Smlsl2) \
2420 V(umull, Umull) \
2421 V(umull2, Umull2) \
2422 V(umlal, Umlal) \
2423 V(umlal2, Umlal2) \
2424 V(umlsl, Umlsl) \
2425 V(umlsl2, Umlsl2)
2426
2427 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2428 void MASM(const VRegister& vd, \
2429 const VRegister& vn, \
2430 const VRegister& vm, \
2431 int vm_index) { \
2432 VIXL_ASSERT(allow_macro_instructions_); \
2433 SingleEmissionCheckScope guard(this); \
2434 ASM(vd, vn, vm, vm_index); \
2435 }
2436 NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2437 #undef DEFINE_MACRO_ASM_FUNC
2438
2439 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
2440 V(rshrn, Rshrn) \
2441 V(rshrn2, Rshrn2) \
2442 V(shl, Shl) \
2443 V(shll, Shll) \
2444 V(shll2, Shll2) \
2445 V(shrn, Shrn) \
2446 V(shrn2, Shrn2) \
2447 V(sli, Sli) \
2448 V(sqrshrn, Sqrshrn) \
2449 V(sqrshrn2, Sqrshrn2) \
2450 V(sqrshrun, Sqrshrun) \
2451 V(sqrshrun2, Sqrshrun2) \
2452 V(sqshl, Sqshl) \
2453 V(sqshlu, Sqshlu) \
2454 V(sqshrn, Sqshrn) \
2455 V(sqshrn2, Sqshrn2) \
2456 V(sqshrun, Sqshrun) \
2457 V(sqshrun2, Sqshrun2) \
2458 V(sri, Sri) \
2459 V(srshr, Srshr) \
2460 V(srsra, Srsra) \
2461 V(sshll, Sshll) \
2462 V(sshll2, Sshll2) \
2463 V(sshr, Sshr) \
2464 V(ssra, Ssra) \
2465 V(uqrshrn, Uqrshrn) \
2466 V(uqrshrn2, Uqrshrn2) \
2467 V(uqshl, Uqshl) \
2468 V(uqshrn, Uqshrn) \
2469 V(uqshrn2, Uqshrn2) \
2470 V(urshr, Urshr) \
2471 V(ursra, Ursra) \
2472 V(ushll, Ushll) \
2473 V(ushll2, Ushll2) \
2474 V(ushr, Ushr) \
2475 V(usra, Usra)
2476
2477 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2478 void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
2479 VIXL_ASSERT(allow_macro_instructions_); \
2480 SingleEmissionCheckScope guard(this); \
2481 ASM(vd, vn, shift); \
2482 }
2483 NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2484 #undef DEFINE_MACRO_ASM_FUNC
2485
2486 void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
2487 VIXL_ASSERT(allow_macro_instructions_);
2488 SingleEmissionCheckScope guard(this);
2489 bic(vd, imm8, left_shift);
2490 }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)2491 void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
2492 VIXL_ASSERT(allow_macro_instructions_);
2493 SingleEmissionCheckScope guard(this);
2494 cmeq(vd, vn, imm);
2495 }
Cmge(const VRegister & vd,const VRegister & vn,int imm)2496 void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
2497 VIXL_ASSERT(allow_macro_instructions_);
2498 SingleEmissionCheckScope guard(this);
2499 cmge(vd, vn, imm);
2500 }
Cmgt(const VRegister & vd,const VRegister & vn,int imm)2501 void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
2502 VIXL_ASSERT(allow_macro_instructions_);
2503 SingleEmissionCheckScope guard(this);
2504 cmgt(vd, vn, imm);
2505 }
Cmle(const VRegister & vd,const VRegister & vn,int imm)2506 void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
2507 VIXL_ASSERT(allow_macro_instructions_);
2508 SingleEmissionCheckScope guard(this);
2509 cmle(vd, vn, imm);
2510 }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)2511 void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
2512 VIXL_ASSERT(allow_macro_instructions_);
2513 SingleEmissionCheckScope guard(this);
2514 cmlt(vd, vn, imm);
2515 }
Dup(const VRegister & vd,const VRegister & vn,int index)2516 void Dup(const VRegister& vd, const VRegister& vn, int index) {
2517 VIXL_ASSERT(allow_macro_instructions_);
2518 SingleEmissionCheckScope guard(this);
2519 dup(vd, vn, index);
2520 }
Dup(const VRegister & vd,const Register & rn)2521 void Dup(const VRegister& vd, const Register& rn) {
2522 VIXL_ASSERT(allow_macro_instructions_);
2523 SingleEmissionCheckScope guard(this);
2524 dup(vd, rn);
2525 }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)2526 void Ext(const VRegister& vd,
2527 const VRegister& vn,
2528 const VRegister& vm,
2529 int index) {
2530 VIXL_ASSERT(allow_macro_instructions_);
2531 SingleEmissionCheckScope guard(this);
2532 ext(vd, vn, vm, index);
2533 }
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2534 void Ins(const VRegister& vd,
2535 int vd_index,
2536 const VRegister& vn,
2537 int vn_index) {
2538 VIXL_ASSERT(allow_macro_instructions_);
2539 SingleEmissionCheckScope guard(this);
2540 ins(vd, vd_index, vn, vn_index);
2541 }
Ins(const VRegister & vd,int vd_index,const Register & rn)2542 void Ins(const VRegister& vd, int vd_index, const Register& rn) {
2543 VIXL_ASSERT(allow_macro_instructions_);
2544 SingleEmissionCheckScope guard(this);
2545 ins(vd, vd_index, rn);
2546 }
Ld1(const VRegister & vt,const MemOperand & src)2547 void Ld1(const VRegister& vt, const MemOperand& src) {
2548 VIXL_ASSERT(allow_macro_instructions_);
2549 SingleEmissionCheckScope guard(this);
2550 ld1(vt, src);
2551 }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2552 void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2553 VIXL_ASSERT(allow_macro_instructions_);
2554 SingleEmissionCheckScope guard(this);
2555 ld1(vt, vt2, src);
2556 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2557 void Ld1(const VRegister& vt,
2558 const VRegister& vt2,
2559 const VRegister& vt3,
2560 const MemOperand& src) {
2561 VIXL_ASSERT(allow_macro_instructions_);
2562 SingleEmissionCheckScope guard(this);
2563 ld1(vt, vt2, vt3, src);
2564 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2565 void Ld1(const VRegister& vt,
2566 const VRegister& vt2,
2567 const VRegister& vt3,
2568 const VRegister& vt4,
2569 const MemOperand& src) {
2570 VIXL_ASSERT(allow_macro_instructions_);
2571 SingleEmissionCheckScope guard(this);
2572 ld1(vt, vt2, vt3, vt4, src);
2573 }
Ld1(const VRegister & vt,int lane,const MemOperand & src)2574 void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
2575 VIXL_ASSERT(allow_macro_instructions_);
2576 SingleEmissionCheckScope guard(this);
2577 ld1(vt, lane, src);
2578 }
Ld1r(const VRegister & vt,const MemOperand & src)2579 void Ld1r(const VRegister& vt, const MemOperand& src) {
2580 VIXL_ASSERT(allow_macro_instructions_);
2581 SingleEmissionCheckScope guard(this);
2582 ld1r(vt, src);
2583 }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2584 void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2585 VIXL_ASSERT(allow_macro_instructions_);
2586 SingleEmissionCheckScope guard(this);
2587 ld2(vt, vt2, src);
2588 }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)2589 void Ld2(const VRegister& vt,
2590 const VRegister& vt2,
2591 int lane,
2592 const MemOperand& src) {
2593 VIXL_ASSERT(allow_macro_instructions_);
2594 SingleEmissionCheckScope guard(this);
2595 ld2(vt, vt2, lane, src);
2596 }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)2597 void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
2598 VIXL_ASSERT(allow_macro_instructions_);
2599 SingleEmissionCheckScope guard(this);
2600 ld2r(vt, vt2, src);
2601 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2602 void Ld3(const VRegister& vt,
2603 const VRegister& vt2,
2604 const VRegister& vt3,
2605 const MemOperand& src) {
2606 VIXL_ASSERT(allow_macro_instructions_);
2607 SingleEmissionCheckScope guard(this);
2608 ld3(vt, vt2, vt3, src);
2609 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)2610 void Ld3(const VRegister& vt,
2611 const VRegister& vt2,
2612 const VRegister& vt3,
2613 int lane,
2614 const MemOperand& src) {
2615 VIXL_ASSERT(allow_macro_instructions_);
2616 SingleEmissionCheckScope guard(this);
2617 ld3(vt, vt2, vt3, lane, src);
2618 }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)2619 void Ld3r(const VRegister& vt,
2620 const VRegister& vt2,
2621 const VRegister& vt3,
2622 const MemOperand& src) {
2623 VIXL_ASSERT(allow_macro_instructions_);
2624 SingleEmissionCheckScope guard(this);
2625 ld3r(vt, vt2, vt3, src);
2626 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2627 void Ld4(const VRegister& vt,
2628 const VRegister& vt2,
2629 const VRegister& vt3,
2630 const VRegister& vt4,
2631 const MemOperand& src) {
2632 VIXL_ASSERT(allow_macro_instructions_);
2633 SingleEmissionCheckScope guard(this);
2634 ld4(vt, vt2, vt3, vt4, src);
2635 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)2636 void Ld4(const VRegister& vt,
2637 const VRegister& vt2,
2638 const VRegister& vt3,
2639 const VRegister& vt4,
2640 int lane,
2641 const MemOperand& src) {
2642 VIXL_ASSERT(allow_macro_instructions_);
2643 SingleEmissionCheckScope guard(this);
2644 ld4(vt, vt2, vt3, vt4, lane, src);
2645 }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)2646 void Ld4r(const VRegister& vt,
2647 const VRegister& vt2,
2648 const VRegister& vt3,
2649 const VRegister& vt4,
2650 const MemOperand& src) {
2651 VIXL_ASSERT(allow_macro_instructions_);
2652 SingleEmissionCheckScope guard(this);
2653 ld4r(vt, vt2, vt3, vt4, src);
2654 }
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)2655 void Mov(const VRegister& vd,
2656 int vd_index,
2657 const VRegister& vn,
2658 int vn_index) {
2659 VIXL_ASSERT(allow_macro_instructions_);
2660 SingleEmissionCheckScope guard(this);
2661 mov(vd, vd_index, vn, vn_index);
2662 }
Mov(const VRegister & vd,const VRegister & vn,int index)2663 void Mov(const VRegister& vd, const VRegister& vn, int index) {
2664 VIXL_ASSERT(allow_macro_instructions_);
2665 SingleEmissionCheckScope guard(this);
2666 mov(vd, vn, index);
2667 }
Mov(const VRegister & vd,int vd_index,const Register & rn)2668 void Mov(const VRegister& vd, int vd_index, const Register& rn) {
2669 VIXL_ASSERT(allow_macro_instructions_);
2670 SingleEmissionCheckScope guard(this);
2671 mov(vd, vd_index, rn);
2672 }
Mov(const Register & rd,const VRegister & vn,int vn_index)2673 void Mov(const Register& rd, const VRegister& vn, int vn_index) {
2674 VIXL_ASSERT(allow_macro_instructions_);
2675 SingleEmissionCheckScope guard(this);
2676 mov(rd, vn, vn_index);
2677 }
2678 void Movi(const VRegister& vd,
2679 uint64_t imm,
2680 Shift shift = LSL,
2681 int shift_amount = 0);
2682 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
2683 void Mvni(const VRegister& vd,
2684 const int imm8,
2685 Shift shift = LSL,
2686 const int shift_amount = 0) {
2687 VIXL_ASSERT(allow_macro_instructions_);
2688 SingleEmissionCheckScope guard(this);
2689 mvni(vd, imm8, shift, shift_amount);
2690 }
2691 void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
2692 VIXL_ASSERT(allow_macro_instructions_);
2693 SingleEmissionCheckScope guard(this);
2694 orr(vd, imm8, left_shift);
2695 }
2696 void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2697 VIXL_ASSERT(allow_macro_instructions_);
2698 SingleEmissionCheckScope guard(this);
2699 scvtf(vd, vn, fbits);
2700 }
2701 void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2702 VIXL_ASSERT(allow_macro_instructions_);
2703 SingleEmissionCheckScope guard(this);
2704 ucvtf(vd, vn, fbits);
2705 }
2706 void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2707 VIXL_ASSERT(allow_macro_instructions_);
2708 SingleEmissionCheckScope guard(this);
2709 fcvtzs(vd, vn, fbits);
2710 }
2711 void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
2712 VIXL_ASSERT(allow_macro_instructions_);
2713 SingleEmissionCheckScope guard(this);
2714 fcvtzu(vd, vn, fbits);
2715 }
St1(const VRegister & vt,const MemOperand & dst)2716 void St1(const VRegister& vt, const MemOperand& dst) {
2717 VIXL_ASSERT(allow_macro_instructions_);
2718 SingleEmissionCheckScope guard(this);
2719 st1(vt, dst);
2720 }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)2721 void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2722 VIXL_ASSERT(allow_macro_instructions_);
2723 SingleEmissionCheckScope guard(this);
2724 st1(vt, vt2, dst);
2725 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)2726 void St1(const VRegister& vt,
2727 const VRegister& vt2,
2728 const VRegister& vt3,
2729 const MemOperand& dst) {
2730 VIXL_ASSERT(allow_macro_instructions_);
2731 SingleEmissionCheckScope guard(this);
2732 st1(vt, vt2, vt3, dst);
2733 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)2734 void St1(const VRegister& vt,
2735 const VRegister& vt2,
2736 const VRegister& vt3,
2737 const VRegister& vt4,
2738 const MemOperand& dst) {
2739 VIXL_ASSERT(allow_macro_instructions_);
2740 SingleEmissionCheckScope guard(this);
2741 st1(vt, vt2, vt3, vt4, dst);
2742 }
St1(const VRegister & vt,int lane,const MemOperand & dst)2743 void St1(const VRegister& vt, int lane, const MemOperand& dst) {
2744 VIXL_ASSERT(allow_macro_instructions_);
2745 SingleEmissionCheckScope guard(this);
2746 st1(vt, lane, dst);
2747 }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)2748 void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
2749 VIXL_ASSERT(allow_macro_instructions_);
2750 SingleEmissionCheckScope guard(this);
2751 st2(vt, vt2, dst);
2752 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)2753 void St3(const VRegister& vt,
2754 const VRegister& vt2,
2755 const VRegister& vt3,
2756 const MemOperand& dst) {
2757 VIXL_ASSERT(allow_macro_instructions_);
2758 SingleEmissionCheckScope guard(this);
2759 st3(vt, vt2, vt3, dst);
2760 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)2761 void St4(const VRegister& vt,
2762 const VRegister& vt2,
2763 const VRegister& vt3,
2764 const VRegister& vt4,
2765 const MemOperand& dst) {
2766 VIXL_ASSERT(allow_macro_instructions_);
2767 SingleEmissionCheckScope guard(this);
2768 st4(vt, vt2, vt3, vt4, dst);
2769 }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)2770 void St2(const VRegister& vt,
2771 const VRegister& vt2,
2772 int lane,
2773 const MemOperand& dst) {
2774 VIXL_ASSERT(allow_macro_instructions_);
2775 SingleEmissionCheckScope guard(this);
2776 st2(vt, vt2, lane, dst);
2777 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)2778 void St3(const VRegister& vt,
2779 const VRegister& vt2,
2780 const VRegister& vt3,
2781 int lane,
2782 const MemOperand& dst) {
2783 VIXL_ASSERT(allow_macro_instructions_);
2784 SingleEmissionCheckScope guard(this);
2785 st3(vt, vt2, vt3, lane, dst);
2786 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)2787 void St4(const VRegister& vt,
2788 const VRegister& vt2,
2789 const VRegister& vt3,
2790 const VRegister& vt4,
2791 int lane,
2792 const MemOperand& dst) {
2793 VIXL_ASSERT(allow_macro_instructions_);
2794 SingleEmissionCheckScope guard(this);
2795 st4(vt, vt2, vt3, vt4, lane, dst);
2796 }
Smov(const Register & rd,const VRegister & vn,int vn_index)2797 void Smov(const Register& rd, const VRegister& vn, int vn_index) {
2798 VIXL_ASSERT(allow_macro_instructions_);
2799 SingleEmissionCheckScope guard(this);
2800 smov(rd, vn, vn_index);
2801 }
Umov(const Register & rd,const VRegister & vn,int vn_index)2802 void Umov(const Register& rd, const VRegister& vn, int vn_index) {
2803 VIXL_ASSERT(allow_macro_instructions_);
2804 SingleEmissionCheckScope guard(this);
2805 umov(rd, vn, vn_index);
2806 }
Crc32b(const Register & rd,const Register & rn,const Register & rm)2807 void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
2808 VIXL_ASSERT(allow_macro_instructions_);
2809 SingleEmissionCheckScope guard(this);
2810 crc32b(rd, rn, rm);
2811 }
Crc32h(const Register & rd,const Register & rn,const Register & rm)2812 void Crc32h(const Register& rd, const Register& rn, const Register& rm) {
2813 VIXL_ASSERT(allow_macro_instructions_);
2814 SingleEmissionCheckScope guard(this);
2815 crc32h(rd, rn, rm);
2816 }
Crc32w(const Register & rd,const Register & rn,const Register & rm)2817 void Crc32w(const Register& rd, const Register& rn, const Register& rm) {
2818 VIXL_ASSERT(allow_macro_instructions_);
2819 SingleEmissionCheckScope guard(this);
2820 crc32w(rd, rn, rm);
2821 }
Crc32x(const Register & rd,const Register & rn,const Register & rm)2822 void Crc32x(const Register& rd, const Register& rn, const Register& rm) {
2823 VIXL_ASSERT(allow_macro_instructions_);
2824 SingleEmissionCheckScope guard(this);
2825 crc32x(rd, rn, rm);
2826 }
Crc32cb(const Register & rd,const Register & rn,const Register & rm)2827 void Crc32cb(const Register& rd, const Register& rn, const Register& rm) {
2828 VIXL_ASSERT(allow_macro_instructions_);
2829 SingleEmissionCheckScope guard(this);
2830 crc32cb(rd, rn, rm);
2831 }
Crc32ch(const Register & rd,const Register & rn,const Register & rm)2832 void Crc32ch(const Register& rd, const Register& rn, const Register& rm) {
2833 VIXL_ASSERT(allow_macro_instructions_);
2834 SingleEmissionCheckScope guard(this);
2835 crc32ch(rd, rn, rm);
2836 }
Crc32cw(const Register & rd,const Register & rn,const Register & rm)2837 void Crc32cw(const Register& rd, const Register& rn, const Register& rm) {
2838 VIXL_ASSERT(allow_macro_instructions_);
2839 SingleEmissionCheckScope guard(this);
2840 crc32cw(rd, rn, rm);
2841 }
Crc32cx(const Register & rd,const Register & rn,const Register & rm)2842 void Crc32cx(const Register& rd, const Register& rn, const Register& rm) {
2843 VIXL_ASSERT(allow_macro_instructions_);
2844 SingleEmissionCheckScope guard(this);
2845 crc32cx(rd, rn, rm);
2846 }
2847
2848 template <typename T>
CreateLiteralDestroyedWithPool(T value)2849 Literal<T>* CreateLiteralDestroyedWithPool(T value) {
2850 return new Literal<T>(value,
2851 &literal_pool_,
2852 RawLiteral::kDeletedOnPoolDestruction);
2853 }
2854
2855 template <typename T>
CreateLiteralDestroyedWithPool(T high64,T low64)2856 Literal<T>* CreateLiteralDestroyedWithPool(T high64, T low64) {
2857 return new Literal<T>(high64,
2858 low64,
2859 &literal_pool_,
2860 RawLiteral::kDeletedOnPoolDestruction);
2861 }
2862
2863 // Push the system stack pointer (sp) down to allow the same to be done to
2864 // the current stack pointer (according to StackPointer()). This must be
2865 // called _before_ accessing the memory.
2866 //
2867 // This is necessary when pushing or otherwise adding things to the stack, to
2868 // satisfy the AAPCS64 constraint that the memory below the system stack
2869 // pointer is not accessed.
2870 //
2871 // This method asserts that StackPointer() is not sp, since the call does
2872 // not make sense in that context.
2873 //
2874 // TODO: This method can only accept values of 'space' that can be encoded in
2875 // one instruction. Refer to the implementation for details.
2876 void BumpSystemStackPointer(const Operand& space);
2877
AllowMacroInstructions()2878 virtual bool AllowMacroInstructions() const VIXL_OVERRIDE {
2879 return allow_macro_instructions_;
2880 }
2881
ArePoolsBlocked()2882 virtual bool ArePoolsBlocked() const VIXL_OVERRIDE {
2883 return IsLiteralPoolBlocked() && IsVeneerPoolBlocked();
2884 }
2885
SetGenerateSimulatorCode(bool value)2886 void SetGenerateSimulatorCode(bool value) {
2887 generate_simulator_code_ = value;
2888 }
2889
GenerateSimulatorCode()2890 bool GenerateSimulatorCode() const { return generate_simulator_code_; }
2891
GetLiteralPoolSize()2892 size_t GetLiteralPoolSize() const { return literal_pool_.GetSize(); }
2893 VIXL_DEPRECATED("GetLiteralPoolSize", size_t LiteralPoolSize() const) {
2894 return GetLiteralPoolSize();
2895 }
2896
GetLiteralPoolMaxSize()2897 size_t GetLiteralPoolMaxSize() const { return literal_pool_.GetMaxSize(); }
2898 VIXL_DEPRECATED("GetLiteralPoolMaxSize", size_t LiteralPoolMaxSize() const) {
2899 return GetLiteralPoolMaxSize();
2900 }
2901
GetVeneerPoolMaxSize()2902 size_t GetVeneerPoolMaxSize() const { return veneer_pool_.GetMaxSize(); }
2903 VIXL_DEPRECATED("GetVeneerPoolMaxSize", size_t VeneerPoolMaxSize() const) {
2904 return GetVeneerPoolMaxSize();
2905 }
2906
2907 // The number of unresolved branches that may require a veneer.
GetNumberOfPotentialVeneers()2908 int GetNumberOfPotentialVeneers() const {
2909 return veneer_pool_.GetNumberOfPotentialVeneers();
2910 }
2911 VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()2912 int NumberOfPotentialVeneers() const) {
2913 return GetNumberOfPotentialVeneers();
2914 }
2915
GetNextCheckPoint()2916 ptrdiff_t GetNextCheckPoint() const {
2917 ptrdiff_t next_checkpoint_for_pools =
2918 std::min(literal_pool_.GetCheckpoint(), veneer_pool_.GetCheckpoint());
2919 return std::min(next_checkpoint_for_pools,
2920 static_cast<ptrdiff_t>(GetBuffer().GetCapacity()));
2921 }
2922 VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
2923 return GetNextCheckPoint();
2924 }
2925
EmitLiteralPool(LiteralPool::EmitOption option)2926 void EmitLiteralPool(LiteralPool::EmitOption option) {
2927 if (!literal_pool_.IsEmpty()) literal_pool_.Emit(option);
2928
2929 checkpoint_ = GetNextCheckPoint();
2930 recommended_checkpoint_ = literal_pool_.GetNextRecommendedCheckpoint();
2931 }
2932
2933 void CheckEmitFor(size_t amount);
EnsureEmitFor(size_t amount)2934 void EnsureEmitFor(size_t amount) {
2935 ptrdiff_t offset = amount;
2936 ptrdiff_t max_pools_size =
2937 literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2938 ptrdiff_t cursor = GetCursorOffset();
2939 if ((cursor >= recommended_checkpoint_) ||
2940 ((cursor + offset + max_pools_size) >= checkpoint_)) {
2941 CheckEmitFor(amount);
2942 }
2943 }
2944
2945 void CheckEmitPoolsFor(size_t amount);
EnsureEmitPoolsFor(size_t amount)2946 virtual void EnsureEmitPoolsFor(size_t amount) VIXL_OVERRIDE {
2947 ptrdiff_t offset = amount;
2948 ptrdiff_t max_pools_size =
2949 literal_pool_.GetMaxSize() + veneer_pool_.GetMaxSize();
2950 ptrdiff_t cursor = GetCursorOffset();
2951 if ((cursor >= recommended_checkpoint_) ||
2952 ((cursor + offset + max_pools_size) >= checkpoint_)) {
2953 CheckEmitPoolsFor(amount);
2954 }
2955 }
2956
2957 // Set the current stack pointer, but don't generate any code.
SetStackPointer(const Register & stack_pointer)2958 void SetStackPointer(const Register& stack_pointer) {
2959 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(stack_pointer));
2960 sp_ = stack_pointer;
2961 }
2962
2963 // Return the current stack pointer, as set by SetStackPointer.
StackPointer()2964 const Register& StackPointer() const { return sp_; }
2965
GetScratchRegisterList()2966 CPURegList* GetScratchRegisterList() { return &tmp_list_; }
2967 VIXL_DEPRECATED("GetScratchRegisterList", CPURegList* TmpList()) {
2968 return GetScratchRegisterList();
2969 }
2970
GetScratchFPRegisterList()2971 CPURegList* GetScratchFPRegisterList() { return &fptmp_list_; }
2972 VIXL_DEPRECATED("GetScratchFPRegisterList", CPURegList* FPTmpList()) {
2973 return GetScratchFPRegisterList();
2974 }
2975
2976 // Get or set the current (most-deeply-nested) UseScratchRegisterScope.
SetCurrentScratchRegisterScope(UseScratchRegisterScope * scope)2977 void SetCurrentScratchRegisterScope(UseScratchRegisterScope* scope) {
2978 current_scratch_scope_ = scope;
2979 }
GetCurrentScratchRegisterScope()2980 UseScratchRegisterScope* GetCurrentScratchRegisterScope() {
2981 return current_scratch_scope_;
2982 }
2983
2984 // Like printf, but print at run-time from generated code.
2985 //
2986 // The caller must ensure that arguments for floating-point placeholders
2987 // (such as %e, %f or %g) are VRegisters in format 1S or 1D, and that
2988 // arguments for integer placeholders are Registers.
2989 //
2990 // At the moment it is only possible to print the value of sp if it is the
2991 // current stack pointer. Otherwise, the MacroAssembler will automatically
2992 // update sp on every push (using BumpSystemStackPointer), so determining its
2993 // value is difficult.
2994 //
2995 // Format placeholders that refer to more than one argument, or to a specific
2996 // argument, are not supported. This includes formats like "%1$d" or "%.*d".
2997 //
2998 // This function automatically preserves caller-saved registers so that
2999 // calling code can use Printf at any point without having to worry about
3000 // corruption. The preservation mechanism generates a lot of code. If this is
3001 // a problem, preserve the important registers manually and then call
3002 // PrintfNoPreserve. Callee-saved registers are not used by Printf, and are
3003 // implicitly preserved.
3004 void Printf(const char* format,
3005 CPURegister arg0 = NoCPUReg,
3006 CPURegister arg1 = NoCPUReg,
3007 CPURegister arg2 = NoCPUReg,
3008 CPURegister arg3 = NoCPUReg);
3009
3010 // Like Printf, but don't preserve any caller-saved registers, not even 'lr'.
3011 //
3012 // The return code from the system printf call will be returned in x0.
3013 void PrintfNoPreserve(const char* format,
3014 const CPURegister& arg0 = NoCPUReg,
3015 const CPURegister& arg1 = NoCPUReg,
3016 const CPURegister& arg2 = NoCPUReg,
3017 const CPURegister& arg3 = NoCPUReg);
3018
3019 // Trace control when running the debug simulator.
3020 //
3021 // For example:
3022 //
3023 // __ Trace(LOG_REGS, TRACE_ENABLE);
3024 // Will add registers to the trace if it wasn't already the case.
3025 //
3026 // __ Trace(LOG_DISASM, TRACE_DISABLE);
3027 // Will stop logging disassembly. It has no effect if the disassembly wasn't
3028 // already being logged.
3029 void Trace(TraceParameters parameters, TraceCommand command);
3030
3031 // Log the requested data independently of what is being traced.
3032 //
3033 // For example:
3034 //
3035 // __ Log(LOG_FLAGS)
3036 // Will output the flags.
3037 void Log(TraceParameters parameters);
3038
3039 // Enable or disable instrumentation when an Instrument visitor is attached to
3040 // the simulator.
3041 void EnableInstrumentation();
3042 void DisableInstrumentation();
3043
3044 // Add a marker to the instrumentation data produced by an Instrument visitor.
3045 // The name is a two character string that will be attached to the marker in
3046 // the output data.
3047 void AnnotateInstrumentation(const char* marker_name);
3048
GetLiteralPool()3049 LiteralPool* GetLiteralPool() { return &literal_pool_; }
3050
3051 // Support for simulated runtime calls.
3052
3053 // `CallRuntime` requires variadic templating, that is only available from
3054 // C++11.
3055 #if __cplusplus >= 201103L
3056 #define VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3057 #endif // #if __cplusplus >= 201103L
3058
3059 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3060 template <typename R, typename... P>
3061 void CallRuntimeHelper(R (*function)(P...), RuntimeCallType call_type);
3062
3063 template <typename R, typename... P>
CallRuntime(R (* function)(P...))3064 void CallRuntime(R (*function)(P...)) {
3065 CallRuntimeHelper(function, kCallRuntime);
3066 }
3067
3068 template <typename R, typename... P>
TailCallRuntime(R (* function)(P...))3069 void TailCallRuntime(R (*function)(P...)) {
3070 CallRuntimeHelper(function, kTailCallRuntime);
3071 }
3072 #endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3073
3074 protected:
BlockLiteralPool()3075 void BlockLiteralPool() { literal_pool_.Block(); }
ReleaseLiteralPool()3076 void ReleaseLiteralPool() { literal_pool_.Release(); }
IsLiteralPoolBlocked()3077 bool IsLiteralPoolBlocked() const { return literal_pool_.IsBlocked(); }
BlockVeneerPool()3078 void BlockVeneerPool() { veneer_pool_.Block(); }
ReleaseVeneerPool()3079 void ReleaseVeneerPool() { veneer_pool_.Release(); }
IsVeneerPoolBlocked()3080 bool IsVeneerPoolBlocked() const { return veneer_pool_.IsBlocked(); }
3081
BlockPools()3082 virtual void BlockPools() VIXL_OVERRIDE {
3083 BlockLiteralPool();
3084 BlockVeneerPool();
3085 }
3086
ReleasePools()3087 virtual void ReleasePools() VIXL_OVERRIDE {
3088 ReleaseLiteralPool();
3089 ReleaseVeneerPool();
3090 }
3091
3092 // The scopes below need to able to block and release a particular pool.
3093 // TODO: Consider removing those scopes or move them to
3094 // code-generation-scopes-vixl.h.
3095 friend class BlockPoolsScope;
3096 friend class BlockLiteralPoolScope;
3097 friend class BlockVeneerPoolScope;
3098
SetAllowMacroInstructions(bool value)3099 virtual void SetAllowMacroInstructions(bool value) VIXL_OVERRIDE {
3100 allow_macro_instructions_ = value;
3101 }
3102
3103 // Helper used to query information about code generation and to generate
3104 // code for `csel`.
3105 // Here and for the related helpers below:
3106 // - Code is generated when `masm` is not `NULL`.
3107 // - On return and when set, `should_synthesise_left` and
3108 // `should_synthesise_right` will indicate whether `left` and `right`
3109 // should be synthesized in a temporary register.
3110 static void CselHelper(MacroAssembler* masm,
3111 const Register& rd,
3112 Operand left,
3113 Operand right,
3114 Condition cond,
3115 bool* should_synthesise_left = NULL,
3116 bool* should_synthesise_right = NULL);
3117
3118 // The helper returns `true` if it can handle the specified arguments.
3119 // Also see comments for `CselHelper()`.
3120 static bool CselSubHelperTwoImmediates(MacroAssembler* masm,
3121 const Register& rd,
3122 int64_t left,
3123 int64_t right,
3124 Condition cond,
3125 bool* should_synthesise_left,
3126 bool* should_synthesise_right);
3127
3128 // See comments for `CselHelper()`.
3129 static bool CselSubHelperTwoOrderedImmediates(MacroAssembler* masm,
3130 const Register& rd,
3131 int64_t left,
3132 int64_t right,
3133 Condition cond);
3134
3135 // See comments for `CselHelper()`.
3136 static void CselSubHelperRightSmallImmediate(MacroAssembler* masm,
3137 UseScratchRegisterScope* temps,
3138 const Register& rd,
3139 const Operand& left,
3140 const Operand& right,
3141 Condition cond,
3142 bool* should_synthesise_left);
3143
3144 private:
3145 // The actual Push and Pop implementations. These don't generate any code
3146 // other than that required for the push or pop. This allows
3147 // (Push|Pop)CPURegList to bundle together setup code for a large block of
3148 // registers.
3149 //
3150 // Note that size is per register, and is specified in bytes.
3151 void PushHelper(int count,
3152 int size,
3153 const CPURegister& src0,
3154 const CPURegister& src1,
3155 const CPURegister& src2,
3156 const CPURegister& src3);
3157 void PopHelper(int count,
3158 int size,
3159 const CPURegister& dst0,
3160 const CPURegister& dst1,
3161 const CPURegister& dst2,
3162 const CPURegister& dst3);
3163
3164 void Movi16bitHelper(const VRegister& vd, uint64_t imm);
3165 void Movi32bitHelper(const VRegister& vd, uint64_t imm);
3166 void Movi64bitHelper(const VRegister& vd, uint64_t imm);
3167
3168 // Perform necessary maintenance operations before a push or pop.
3169 //
3170 // Note that size is per register, and is specified in bytes.
3171 void PrepareForPush(int count, int size);
3172 void PrepareForPop(int count, int size);
3173
3174 // The actual implementation of load and store operations for CPURegList.
3175 enum LoadStoreCPURegListAction { kLoad, kStore };
3176 void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
3177 CPURegList registers,
3178 const MemOperand& mem);
3179 // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
3180 // This helper may allocate registers from `scratch_scope` and generate code
3181 // to compute an intermediate address. The resulting MemOperand is only valid
3182 // as long as `scratch_scope` remains valid.
3183 MemOperand BaseMemOperandForLoadStoreCPURegList(
3184 const CPURegList& registers,
3185 const MemOperand& mem,
3186 UseScratchRegisterScope* scratch_scope);
3187
LabelIsOutOfRange(Label * label,ImmBranchType branch_type)3188 bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
3189 return !Instruction::IsValidImmPCOffset(branch_type,
3190 label->GetLocation() -
3191 GetCursorOffset());
3192 }
3193
3194 // Tell whether any of the macro instruction can be used. When false the
3195 // MacroAssembler will assert if a method which can emit a variable number
3196 // of instructions is called.
3197 bool allow_macro_instructions_;
3198
3199 // Indicates whether we should generate simulator or native code.
3200 bool generate_simulator_code_;
3201
3202 // The register to use as a stack pointer for stack operations.
3203 Register sp_;
3204
3205 // Scratch registers available for use by the MacroAssembler.
3206 CPURegList tmp_list_;
3207 CPURegList fptmp_list_;
3208
3209 UseScratchRegisterScope* current_scratch_scope_;
3210
3211 LiteralPool literal_pool_;
3212 VeneerPool veneer_pool_;
3213
3214 ptrdiff_t checkpoint_;
3215 ptrdiff_t recommended_checkpoint_;
3216
3217 friend class Pool;
3218 friend class LiteralPool;
3219 };
3220
3221
GetOtherPoolsMaxSize()3222 inline size_t VeneerPool::GetOtherPoolsMaxSize() const {
3223 return masm_->GetLiteralPoolMaxSize();
3224 }
3225
3226
GetOtherPoolsMaxSize()3227 inline size_t LiteralPool::GetOtherPoolsMaxSize() const {
3228 return masm_->GetVeneerPoolMaxSize();
3229 }
3230
3231
SetNextRecommendedCheckpoint(ptrdiff_t offset)3232 inline void LiteralPool::SetNextRecommendedCheckpoint(ptrdiff_t offset) {
3233 masm_->recommended_checkpoint_ =
3234 std::min(masm_->recommended_checkpoint_, offset);
3235 recommended_checkpoint_ = offset;
3236 }
3237
3238 class InstructionAccurateScope : public ExactAssemblyScope {
3239 public:
3240 VIXL_DEPRECATED("ExactAssemblyScope",
3241 InstructionAccurateScope(MacroAssembler* masm,
3242 int64_t count,
3243 SizePolicy size_policy = kExactSize))
ExactAssemblyScope(masm,count * kInstructionSize,size_policy)3244 : ExactAssemblyScope(masm, count * kInstructionSize, size_policy) {}
3245 };
3246
3247 class BlockLiteralPoolScope {
3248 public:
BlockLiteralPoolScope(MacroAssembler * masm)3249 explicit BlockLiteralPoolScope(MacroAssembler* masm) : masm_(masm) {
3250 masm_->BlockLiteralPool();
3251 }
3252
~BlockLiteralPoolScope()3253 ~BlockLiteralPoolScope() { masm_->ReleaseLiteralPool(); }
3254
3255 private:
3256 MacroAssembler* masm_;
3257 };
3258
3259
3260 class BlockVeneerPoolScope {
3261 public:
BlockVeneerPoolScope(MacroAssembler * masm)3262 explicit BlockVeneerPoolScope(MacroAssembler* masm) : masm_(masm) {
3263 masm_->BlockVeneerPool();
3264 }
3265
~BlockVeneerPoolScope()3266 ~BlockVeneerPoolScope() { masm_->ReleaseVeneerPool(); }
3267
3268 private:
3269 MacroAssembler* masm_;
3270 };
3271
3272
3273 class BlockPoolsScope {
3274 public:
BlockPoolsScope(MacroAssembler * masm)3275 explicit BlockPoolsScope(MacroAssembler* masm) : masm_(masm) {
3276 masm_->BlockPools();
3277 }
3278
~BlockPoolsScope()3279 ~BlockPoolsScope() { masm_->ReleasePools(); }
3280
3281 private:
3282 MacroAssembler* masm_;
3283 };
3284
3285
3286 // This scope utility allows scratch registers to be managed safely. The
3287 // MacroAssembler's GetScratchRegisterList() (and GetScratchFPRegisterList()) is
3288 // used as a pool of scratch registers. These registers can be allocated on
3289 // demand, and will be returned at the end of the scope.
3290 //
3291 // When the scope ends, the MacroAssembler's lists will be restored to their
3292 // original state, even if the lists were modified by some other means.
3293 class UseScratchRegisterScope {
3294 public:
3295 // This constructor implicitly calls `Open` to initialise the scope (`masm`
3296 // must not be `NULL`), so it is ready to use immediately after it has been
3297 // constructed.
UseScratchRegisterScope(MacroAssembler * masm)3298 explicit UseScratchRegisterScope(MacroAssembler* masm)
3299 : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {
3300 Open(masm);
3301 }
3302 // This constructor does not implicitly initialise the scope. Instead, the
3303 // user is required to explicitly call the `Open` function before using the
3304 // scope.
UseScratchRegisterScope()3305 UseScratchRegisterScope()
3306 : masm_(NULL), parent_(NULL), old_available_(0), old_availablefp_(0) {}
3307
3308 // This function performs the actual initialisation work.
3309 void Open(MacroAssembler* masm);
3310
3311 // The destructor always implicitly calls the `Close` function.
~UseScratchRegisterScope()3312 ~UseScratchRegisterScope() { Close(); }
3313
3314 // This function performs the cleaning-up work. It must succeed even if the
3315 // scope has not been opened. It is safe to call multiple times.
3316 void Close();
3317
3318
3319 bool IsAvailable(const CPURegister& reg) const;
3320
3321
3322 // Take a register from the appropriate temps list. It will be returned
3323 // automatically when the scope ends.
AcquireW()3324 Register AcquireW() {
3325 return AcquireNextAvailable(masm_->GetScratchRegisterList()).W();
3326 }
AcquireX()3327 Register AcquireX() {
3328 return AcquireNextAvailable(masm_->GetScratchRegisterList()).X();
3329 }
AcquireS()3330 VRegister AcquireS() {
3331 return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).S();
3332 }
AcquireD()3333 VRegister AcquireD() {
3334 return AcquireNextAvailable(masm_->GetScratchFPRegisterList()).D();
3335 }
3336
3337
3338 Register AcquireRegisterOfSize(int size_in_bits);
AcquireSameSizeAs(const Register & reg)3339 Register AcquireSameSizeAs(const Register& reg) {
3340 return AcquireRegisterOfSize(reg.GetSizeInBits());
3341 }
3342 VRegister AcquireVRegisterOfSize(int size_in_bits);
AcquireSameSizeAs(const VRegister & reg)3343 VRegister AcquireSameSizeAs(const VRegister& reg) {
3344 return AcquireVRegisterOfSize(reg.GetSizeInBits());
3345 }
AcquireCPURegisterOfSize(int size_in_bits)3346 CPURegister AcquireCPURegisterOfSize(int size_in_bits) {
3347 return masm_->GetScratchRegisterList()->IsEmpty()
3348 ? CPURegister(AcquireVRegisterOfSize(size_in_bits))
3349 : CPURegister(AcquireRegisterOfSize(size_in_bits));
3350 }
3351
3352
3353 // Explicitly release an acquired (or excluded) register, putting it back in
3354 // the appropriate temps list.
3355 void Release(const CPURegister& reg);
3356
3357
3358 // Make the specified registers available as scratch registers for the
3359 // duration of this scope.
3360 void Include(const CPURegList& list);
3361 void Include(const Register& reg1,
3362 const Register& reg2 = NoReg,
3363 const Register& reg3 = NoReg,
3364 const Register& reg4 = NoReg);
3365 void Include(const VRegister& reg1,
3366 const VRegister& reg2 = NoVReg,
3367 const VRegister& reg3 = NoVReg,
3368 const VRegister& reg4 = NoVReg);
3369
3370
3371 // Make sure that the specified registers are not available in this scope.
3372 // This can be used to prevent helper functions from using sensitive
3373 // registers, for example.
3374 void Exclude(const CPURegList& list);
3375 void Exclude(const Register& reg1,
3376 const Register& reg2 = NoReg,
3377 const Register& reg3 = NoReg,
3378 const Register& reg4 = NoReg);
3379 void Exclude(const VRegister& reg1,
3380 const VRegister& reg2 = NoVReg,
3381 const VRegister& reg3 = NoVReg,
3382 const VRegister& reg4 = NoVReg);
3383 void Exclude(const CPURegister& reg1,
3384 const CPURegister& reg2 = NoCPUReg,
3385 const CPURegister& reg3 = NoCPUReg,
3386 const CPURegister& reg4 = NoCPUReg);
3387
3388
3389 // Prevent any scratch registers from being used in this scope.
3390 void ExcludeAll();
3391
3392 private:
3393 static CPURegister AcquireNextAvailable(CPURegList* available);
3394
3395 static void ReleaseByCode(CPURegList* available, int code);
3396
3397 static void ReleaseByRegList(CPURegList* available, RegList regs);
3398
3399 static void IncludeByRegList(CPURegList* available, RegList exclude);
3400
3401 static void ExcludeByRegList(CPURegList* available, RegList exclude);
3402
3403 // The MacroAssembler maintains a list of available scratch registers, and
3404 // also keeps track of the most recently-opened scope so that on destruction
3405 // we can check that scopes do not outlive their parents.
3406 MacroAssembler* masm_;
3407 UseScratchRegisterScope* parent_;
3408
3409 // The state of the available lists at the start of this scope.
3410 RegList old_available_; // kRegister
3411 RegList old_availablefp_; // kVRegister
3412
3413 // Disallow copy constructor and operator=.
UseScratchRegisterScope(const UseScratchRegisterScope &)3414 VIXL_DEBUG_NO_RETURN UseScratchRegisterScope(const UseScratchRegisterScope&) {
3415 VIXL_UNREACHABLE();
3416 }
3417 VIXL_DEBUG_NO_RETURN void operator=(const UseScratchRegisterScope&) {
3418 VIXL_UNREACHABLE();
3419 }
3420 };
3421
3422 // Variadic templating is only available from C++11.
3423 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3424
3425 // `R` stands for 'return type', and `P` for 'parameter types'.
3426 template <typename R, typename... P>
CallRuntimeHelper(R (* function)(P...),RuntimeCallType call_type)3427 void MacroAssembler::CallRuntimeHelper(R (*function)(P...),
3428 RuntimeCallType call_type) {
3429 if (generate_simulator_code_) {
3430 #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3431 uintptr_t runtime_call_wrapper_address = reinterpret_cast<uintptr_t>(
3432 &(Simulator::RuntimeCallStructHelper<R, P...>::Wrapper));
3433 uintptr_t function_address = reinterpret_cast<uintptr_t>(function);
3434
3435 EmissionCheckScope guard(this,
3436 kRuntimeCallLength,
3437 CodeBufferCheckScope::kExactSize);
3438 Label start;
3439 bind(&start);
3440 {
3441 ExactAssemblyScope scope(this, kInstructionSize);
3442 hlt(kRuntimeCallOpcode);
3443 }
3444 VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3445 kRuntimeCallWrapperOffset);
3446 dc(runtime_call_wrapper_address);
3447 VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) ==
3448 kRuntimeCallFunctionOffset);
3449 dc(function_address);
3450 VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallTypeOffset);
3451 dc32(call_type);
3452 VIXL_ASSERT(GetSizeOfCodeGeneratedSince(&start) == kRuntimeCallLength);
3453 #else
3454 VIXL_UNREACHABLE();
3455 #endif // #ifdef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
3456 } else {
3457 UseScratchRegisterScope temps(this);
3458 Register temp = temps.AcquireX();
3459 Mov(temp, reinterpret_cast<uint64_t>(function));
3460 if (call_type == kTailCallRuntime) {
3461 Br(temp);
3462 } else {
3463 VIXL_ASSERT(call_type == kCallRuntime);
3464 Blr(temp);
3465 }
3466 }
3467 }
3468
3469 #endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
3470
3471 } // namespace aarch64
3472
3473 // Required InvalSet template specialisations.
3474 // TODO: These template specialisations should not live in this file. Move
3475 // VeneerPool out of the aarch64 namespace in order to share its implementation
3476 // later.
3477 template <>
3478 inline ptrdiff_t InvalSet<aarch64::VeneerPool::BranchInfo,
3479 aarch64::VeneerPool::kNPreallocatedInfos,
3480 ptrdiff_t,
3481 aarch64::VeneerPool::kInvalidOffset,
3482 aarch64::VeneerPool::kReclaimFrom,
3483 aarch64::VeneerPool::kReclaimFactor>::
GetKey(const aarch64::VeneerPool::BranchInfo & branch_info)3484 GetKey(const aarch64::VeneerPool::BranchInfo& branch_info) {
3485 return branch_info.first_unreacheable_pc_;
3486 }
3487 template <>
3488 inline void InvalSet<aarch64::VeneerPool::BranchInfo,
3489 aarch64::VeneerPool::kNPreallocatedInfos,
3490 ptrdiff_t,
3491 aarch64::VeneerPool::kInvalidOffset,
3492 aarch64::VeneerPool::kReclaimFrom,
3493 aarch64::VeneerPool::kReclaimFactor>::
SetKey(aarch64::VeneerPool::BranchInfo * branch_info,ptrdiff_t key)3494 SetKey(aarch64::VeneerPool::BranchInfo* branch_info, ptrdiff_t key) {
3495 branch_info->first_unreacheable_pc_ = key;
3496 }
3497
3498 } // namespace vixl
3499
3500 #endif // VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
3501