1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #ifndef VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
28 #define VIXL_AARCH64_MACRO_ASSEMBLER_AARCH64_H_
29
30 #include <algorithm>
31 #include <limits>
32
33 #include "../code-generation-scopes-vixl.h"
34 #include "../globals-vixl.h"
35 #include "../macro-assembler-interface.h"
36
37 #include "assembler-aarch64.h"
38 // Required for runtime call support.
39 // TODO: Break this dependency. We should be able to separate out the necessary
40 // parts so that we don't need to include the whole simulator header.
41 #include "simulator-aarch64.h"
42 // Required in order to generate debugging instructions for the simulator. This
43 // is needed regardless of whether the simulator is included or not, since
44 // generating simulator specific instructions is controlled at runtime.
45 #include "simulator-constants-aarch64.h"
46
47
48 #define LS_MACRO_LIST(V) \
49 V(Ldrb, Register&, rt, LDRB_w) \
50 V(Strb, Register&, rt, STRB_w) \
51 V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
52 V(Ldrh, Register&, rt, LDRH_w) \
53 V(Strh, Register&, rt, STRH_w) \
54 V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
55 V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
56 V(Str, CPURegister&, rt, StoreOpFor(rt)) \
57 V(Ldrsw, Register&, rt, LDRSW_x)
58
59
60 #define LSPAIR_MACRO_LIST(V) \
61 V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
62 V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
63 V(Ldpsw, Register&, rt, rt2, LDPSW_x)
64
65 namespace vixl {
66 namespace aarch64 {
67
68 // Forward declaration
69 class MacroAssembler;
70 class UseScratchRegisterScope;
71
72 class Pool {
73 public:
Pool(MacroAssembler * masm)74 explicit Pool(MacroAssembler* masm)
75 : checkpoint_(kNoCheckpointRequired), masm_(masm) {
76 Reset();
77 }
78
Reset()79 void Reset() {
80 checkpoint_ = kNoCheckpointRequired;
81 monitor_ = 0;
82 }
83
Block()84 void Block() { monitor_++; }
85 void Release();
IsBlocked()86 bool IsBlocked() const { return monitor_ != 0; }
87
88 static const ptrdiff_t kNoCheckpointRequired = PTRDIFF_MAX;
89
90 void SetNextCheckpoint(ptrdiff_t checkpoint);
GetCheckpoint()91 ptrdiff_t GetCheckpoint() const { return checkpoint_; }
92 VIXL_DEPRECATED("GetCheckpoint", ptrdiff_t checkpoint() const) {
93 return GetCheckpoint();
94 }
95
96 enum EmitOption { kBranchRequired, kNoBranchRequired };
97
98 protected:
99 // Next buffer offset at which a check is required for this pool.
100 ptrdiff_t checkpoint_;
101 // Indicates whether the emission of this pool is blocked.
102 int monitor_;
103 // The MacroAssembler using this pool.
104 MacroAssembler* masm_;
105 };
106
107
108 class LiteralPool : public Pool {
109 public:
110 #ifndef PANDA_BUILD
111 explicit LiteralPool(MacroAssembler* masm);
112 #else
113 explicit LiteralPool(MacroAssembler* masm) = delete;
114 LiteralPool(AllocatorWrapper allocator, MacroAssembler* masm);
115 #endif
116 ~LiteralPool() VIXL_NEGATIVE_TESTING_ALLOW_EXCEPTION;
117 void Reset();
118
119 void AddEntry(RawLiteral* literal);
IsEmpty()120 bool IsEmpty() const { return entries_.empty(); }
121 size_t GetSize() const;
122 VIXL_DEPRECATED("GetSize", size_t Size() const) { return GetSize(); }
123
124 size_t GetMaxSize() const;
125 VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
126
127 size_t GetOtherPoolsMaxSize() const;
128 VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
129 return GetOtherPoolsMaxSize();
130 }
131
132 void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
133 // Check whether we need to emit the literal pool in order to be able to
134 // safely emit a branch with a given range.
135 void CheckEmitForBranch(size_t range);
136 void Emit(EmitOption option = kNoBranchRequired);
137
138 void SetNextRecommendedCheckpoint(ptrdiff_t offset);
139 ptrdiff_t GetNextRecommendedCheckpoint();
140 VIXL_DEPRECATED("GetNextRecommendedCheckpoint",
141 ptrdiff_t NextRecommendedCheckpoint()) {
142 return GetNextRecommendedCheckpoint();
143 }
144
145 void UpdateFirstUse(ptrdiff_t use_position);
146
DeleteOnDestruction(RawLiteral * literal)147 void DeleteOnDestruction(RawLiteral* literal) {
148 deleted_on_destruction_.push_back(literal);
149 }
150
151 // Recommended not exact since the pool can be blocked for short periods.
152 static const ptrdiff_t kRecommendedLiteralPoolRange = 128 * KBytes;
153
154 private:
155 #ifndef PANDA_BUILD
156 std::vector<RawLiteral*> entries_;
157 #else
158 Vector<RawLiteral*> entries_;
159 #endif
160 size_t size_;
161 ptrdiff_t first_use_;
162 // The parent class `Pool` provides a `checkpoint_`, which is the buffer
163 // offset before which a check *must* occur. This recommended checkpoint
164 // indicates when we would like to start emitting the constant pool. The
165 // MacroAssembler can, but does not have to, check the buffer when the
166 // checkpoint is reached.
167 ptrdiff_t recommended_checkpoint_;
168
169 #ifndef PANDA_BUILD
170 std::vector<RawLiteral*> deleted_on_destruction_;
171 #else
172 Vector<RawLiteral*> deleted_on_destruction_;
173 AllocatorWrapper allocator_;
174 #endif
175 };
176
177
GetSize()178 inline size_t LiteralPool::GetSize() const {
179 // Account for the pool header.
180 return size_ + kInstructionSize;
181 }
182
183
GetMaxSize()184 inline size_t LiteralPool::GetMaxSize() const {
185 // Account for the potential branch over the pool.
186 return GetSize() + kInstructionSize;
187 }
188
189
GetNextRecommendedCheckpoint()190 inline ptrdiff_t LiteralPool::GetNextRecommendedCheckpoint() {
191 return first_use_ + kRecommendedLiteralPoolRange;
192 }
193
194
195 class VeneerPool : public Pool {
196 public:
197 #ifndef PANDA_BUILD
VeneerPool(MacroAssembler * masm)198 explicit VeneerPool(MacroAssembler* masm) : Pool(masm) {}
199 #else
200 explicit VeneerPool(MacroAssembler* masm) = delete;
201 VeneerPool(AllocatorWrapper allocator, MacroAssembler* masm) : Pool(masm), unresolved_branches_(allocator), allocator_(allocator) {}
202 #endif
203 void Reset();
204
Block()205 void Block() { monitor_++; }
206 void Release();
IsBlocked()207 bool IsBlocked() const { return monitor_ != 0; }
IsEmpty()208 bool IsEmpty() const { return unresolved_branches_.IsEmpty(); }
209
210 class BranchInfo {
211 public:
BranchInfo()212 BranchInfo()
213 : first_unreacheable_pc_(0),
214 pc_offset_(0),
215 label_(NULL),
216 branch_type_(UnknownBranchType) {}
BranchInfo(ptrdiff_t offset,Label * label,ImmBranchType branch_type)217 BranchInfo(ptrdiff_t offset, Label* label, ImmBranchType branch_type)
218 : pc_offset_(offset), label_(label), branch_type_(branch_type) {
219 first_unreacheable_pc_ =
220 pc_offset_ + Instruction::GetImmBranchForwardRange(branch_type_);
221 }
222
IsValidComparison(const BranchInfo & branch_1,const BranchInfo & branch_2)223 static bool IsValidComparison(const BranchInfo& branch_1,
224 const BranchInfo& branch_2) {
225 // BranchInfo are always compared against against other objects with
226 // the same branch type.
227 if (branch_1.branch_type_ != branch_2.branch_type_) {
228 return false;
229 }
230 // Since we should never have two branch infos with the same offsets, it
231 // first looks like we should check that offsets are different. However
232 // the operators may also be used to *search* for a branch info in the
233 // set.
234 bool same_offsets = (branch_1.pc_offset_ == branch_2.pc_offset_);
235 return (!same_offsets || ((branch_1.label_ == branch_2.label_) &&
236 (branch_1.first_unreacheable_pc_ ==
237 branch_2.first_unreacheable_pc_)));
238 }
239
240 // We must provide comparison operators to work with InvalSet.
241 bool operator==(const BranchInfo& other) const {
242 VIXL_ASSERT(IsValidComparison(*this, other));
243 return pc_offset_ == other.pc_offset_;
244 }
245 bool operator<(const BranchInfo& other) const {
246 VIXL_ASSERT(IsValidComparison(*this, other));
247 return pc_offset_ < other.pc_offset_;
248 }
249 bool operator<=(const BranchInfo& other) const {
250 VIXL_ASSERT(IsValidComparison(*this, other));
251 return pc_offset_ <= other.pc_offset_;
252 }
253 bool operator>(const BranchInfo& other) const {
254 VIXL_ASSERT(IsValidComparison(*this, other));
255 return pc_offset_ > other.pc_offset_;
256 }
257
258 // First instruction position that is not reachable by the branch using a
259 // positive branch offset.
260 ptrdiff_t first_unreacheable_pc_;
261 // Offset of the branch in the code generation buffer.
262 ptrdiff_t pc_offset_;
263 // The label branched to.
264 Label* label_;
265 ImmBranchType branch_type_;
266 };
267
BranchTypeUsesVeneers(ImmBranchType type)268 bool BranchTypeUsesVeneers(ImmBranchType type) {
269 return (type != UnknownBranchType) && (type != UncondBranchType);
270 }
271
272 void RegisterUnresolvedBranch(ptrdiff_t branch_pos,
273 Label* label,
274 ImmBranchType branch_type);
275 void DeleteUnresolvedBranchInfoForLabel(Label* label);
276
277 bool ShouldEmitVeneer(int64_t first_unreacheable_pc, size_t amount);
ShouldEmitVeneers(size_t amount)278 bool ShouldEmitVeneers(size_t amount) {
279 return ShouldEmitVeneer(unresolved_branches_.GetFirstLimit(), amount);
280 }
281
282 void CheckEmitFor(size_t amount, EmitOption option = kBranchRequired);
283 void Emit(EmitOption option, size_t margin);
284
285 // The code size generated for a veneer. Currently one branch instruction.
286 // This is for code size checking purposes, and can be extended in the future
287 // for example if we decide to add nops between the veneers.
288 static const int kVeneerCodeSize = 1 * kInstructionSize;
289 // The maximum size of code other than veneers that can be generated when
290 // emitting a veneer pool. Currently there can be an additional branch to jump
291 // over the pool.
292 static const int kPoolNonVeneerCodeSize = 1 * kInstructionSize;
293
UpdateNextCheckPoint()294 void UpdateNextCheckPoint() { SetNextCheckpoint(GetNextCheckPoint()); }
295
GetNumberOfPotentialVeneers()296 int GetNumberOfPotentialVeneers() const {
297 return static_cast<int>(unresolved_branches_.GetSize());
298 }
299 VIXL_DEPRECATED("GetNumberOfPotentialVeneers",
NumberOfPotentialVeneers()300 int NumberOfPotentialVeneers() const) {
301 return GetNumberOfPotentialVeneers();
302 }
303
GetMaxSize()304 size_t GetMaxSize() const {
305 return kPoolNonVeneerCodeSize +
306 unresolved_branches_.GetSize() * kVeneerCodeSize;
307 }
308 VIXL_DEPRECATED("GetMaxSize", size_t MaxSize() const) { return GetMaxSize(); }
309
310 size_t GetOtherPoolsMaxSize() const;
311 VIXL_DEPRECATED("GetOtherPoolsMaxSize", size_t OtherPoolsMaxSize() const) {
312 return GetOtherPoolsMaxSize();
313 }
314
315 static const int kNPreallocatedInfos = 4;
316 static const ptrdiff_t kInvalidOffset = PTRDIFF_MAX;
317 static const size_t kReclaimFrom = 128;
318 static const size_t kReclaimFactor = 16;
319
320 private:
321 typedef InvalSet<BranchInfo,
322 kNPreallocatedInfos,
323 ptrdiff_t,
324 kInvalidOffset,
325 kReclaimFrom,
326 kReclaimFactor>
327 BranchInfoTypedSetBase;
328 typedef InvalSetIterator<BranchInfoTypedSetBase> BranchInfoTypedSetIterBase;
329
330 class BranchInfoTypedSet : public BranchInfoTypedSetBase {
331 public:
332 #ifndef PANDA_BUILD
BranchInfoTypedSet()333 BranchInfoTypedSet() : BranchInfoTypedSetBase() {}
334 #else
335 BranchInfoTypedSet() = delete;
336 explicit BranchInfoTypedSet(AllocatorWrapper alloc) : BranchInfoTypedSetBase(alloc) {}
337 BranchInfoTypedSet(BranchInfoTypedSet&&) = default;
338 #endif
GetFirstLimit()339 ptrdiff_t GetFirstLimit() {
340 if (empty()) {
341 return kInvalidOffset;
342 }
343 return GetMinElementKey();
344 }
345 VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
346 return GetFirstLimit();
347 }
348 };
349
350 class BranchInfoTypedSetIterator : public BranchInfoTypedSetIterBase {
351 public:
BranchInfoTypedSetIterator()352 BranchInfoTypedSetIterator() : BranchInfoTypedSetIterBase(NULL) {}
BranchInfoTypedSetIterator(BranchInfoTypedSet * typed_set)353 explicit BranchInfoTypedSetIterator(BranchInfoTypedSet* typed_set)
354 : BranchInfoTypedSetIterBase(typed_set) {}
355
356 // TODO: Remove these and use the STL-like interface instead.
357 using BranchInfoTypedSetIterBase::Advance;
358 using BranchInfoTypedSetIterBase::Current;
359 };
360
361 class BranchInfoSet {
362 public:
363 #ifdef PANDA_BUILD
364 BranchInfoSet() = delete;
BranchInfoSet(AllocatorWrapper allocator)365 BranchInfoSet(AllocatorWrapper allocator) :
366 typed_set_(allocator.Adapter()) {
367 typed_set_.reserve(3);
368 typed_set_.emplace_back((allocator));
369 typed_set_.emplace_back((allocator));
370 typed_set_.emplace_back((allocator));
371 };
372 #endif
insert(BranchInfo branch_info)373 void insert(BranchInfo branch_info) {
374 ImmBranchType type = branch_info.branch_type_;
375 VIXL_ASSERT(IsValidBranchType(type));
376 typed_set_[BranchIndexFromType(type)].insert(branch_info);
377 }
378
erase(BranchInfo branch_info)379 void erase(BranchInfo branch_info) {
380 if (IsValidBranchType(branch_info.branch_type_)) {
381 int index =
382 BranchInfoSet::BranchIndexFromType(branch_info.branch_type_);
383 typed_set_[index].erase(branch_info);
384 }
385 }
386
GetSize()387 size_t GetSize() const {
388 size_t res = 0;
389 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
390 res += typed_set_[i].size();
391 }
392 return res;
393 }
394 VIXL_DEPRECATED("GetSize", size_t size() const) { return GetSize(); }
395
IsEmpty()396 bool IsEmpty() const {
397 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
398 if (!typed_set_[i].empty()) {
399 return false;
400 }
401 }
402 return true;
403 }
empty()404 VIXL_DEPRECATED("IsEmpty", bool empty() const) { return IsEmpty(); }
405
GetFirstLimit()406 ptrdiff_t GetFirstLimit() {
407 ptrdiff_t res = kInvalidOffset;
408 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
409 res = std::min(res, typed_set_[i].GetFirstLimit());
410 }
411 return res;
412 }
413 VIXL_DEPRECATED("GetFirstLimit", ptrdiff_t FirstLimit()) {
414 return GetFirstLimit();
415 }
416
Reset()417 void Reset() {
418 for (int i = 0; i < kNumberOfTrackedBranchTypes; i++) {
419 typed_set_[i].clear();
420 }
421 }
422
BranchTypeFromIndex(int index)423 static ImmBranchType BranchTypeFromIndex(int index) {
424 switch (index) {
425 case 0:
426 return CondBranchType;
427 case 1:
428 return CompareBranchType;
429 case 2:
430 return TestBranchType;
431 default:
432 VIXL_UNREACHABLE();
433 return UnknownBranchType;
434 }
435 }
BranchIndexFromType(ImmBranchType branch_type)436 static int BranchIndexFromType(ImmBranchType branch_type) {
437 switch (branch_type) {
438 case CondBranchType:
439 return 0;
440 case CompareBranchType:
441 return 1;
442 case TestBranchType:
443 return 2;
444 default:
445 VIXL_UNREACHABLE();
446 return 0;
447 }
448 }
449
IsValidBranchType(ImmBranchType branch_type)450 bool IsValidBranchType(ImmBranchType branch_type) {
451 return (branch_type != UnknownBranchType) &&
452 (branch_type != UncondBranchType);
453 }
454
455 private:
456 static const int kNumberOfTrackedBranchTypes = 3;
457 #ifndef PANDA_BUILD
458 BranchInfoTypedSet typed_set_[kNumberOfTrackedBranchTypes];
459 #else
460 Vector<BranchInfoTypedSet> typed_set_;
461 #endif
462 friend class VeneerPool;
463 friend class BranchInfoSetIterator;
464 };
465
466 class BranchInfoSetIterator {
467 public:
468 #ifndef PANDA_BUILD
BranchInfoSetIterator(BranchInfoSet * set)469 explicit BranchInfoSetIterator(BranchInfoSet* set) : set_(set) {
470 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
471 new (&sub_iterator_[i])
472 BranchInfoTypedSetIterator(&(set_->typed_set_[i]));
473 }
474 }
475 #else
476 explicit BranchInfoSetIterator(BranchInfoSet* set) = delete;
477 BranchInfoSetIterator(AllocatorWrapper allocator, BranchInfoSet* set) : set_(set), sub_iterator_(allocator.Adapter()) {
478 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
479 sub_iterator_.emplace_back(&(set_->typed_set_[i]));
480 }
481 }
482
483 #endif
484
Current()485 VeneerPool::BranchInfo* Current() {
486 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
487 if (!sub_iterator_[i].Done()) {
488 return sub_iterator_[i].Current();
489 }
490 }
491 VIXL_UNREACHABLE();
492 return NULL;
493 }
494
Advance()495 void Advance() {
496 VIXL_ASSERT(!Done());
497 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
498 if (!sub_iterator_[i].Done()) {
499 sub_iterator_[i].Advance();
500 return;
501 }
502 }
503 VIXL_UNREACHABLE();
504 }
505
Done()506 bool Done() const {
507 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
508 if (!sub_iterator_[i].Done()) return false;
509 }
510 return true;
511 }
512
AdvanceToNextType()513 void AdvanceToNextType() {
514 VIXL_ASSERT(!Done());
515 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
516 if (!sub_iterator_[i].Done()) {
517 sub_iterator_[i].Finish();
518 return;
519 }
520 }
521 VIXL_UNREACHABLE();
522 }
523
DeleteCurrentAndAdvance()524 void DeleteCurrentAndAdvance() {
525 for (int i = 0; i < BranchInfoSet::kNumberOfTrackedBranchTypes; i++) {
526 if (!sub_iterator_[i].Done()) {
527 sub_iterator_[i].DeleteCurrentAndAdvance();
528 return;
529 }
530 }
531 }
532
533 private:
534 BranchInfoSet* set_;
535 #ifndef PANDA_BUILD
536 BranchInfoTypedSetIterator
537 sub_iterator_[BranchInfoSet::kNumberOfTrackedBranchTypes];
538 #else
539 Vector<BranchInfoTypedSetIterator> sub_iterator_;
540 #endif
541 };
542
GetNextCheckPoint()543 ptrdiff_t GetNextCheckPoint() {
544 if (unresolved_branches_.IsEmpty()) {
545 return kNoCheckpointRequired;
546 } else {
547 return unresolved_branches_.GetFirstLimit();
548 }
549 }
550 VIXL_DEPRECATED("GetNextCheckPoint", ptrdiff_t NextCheckPoint()) {
551 return GetNextCheckPoint();
552 }
553
554 // Information about unresolved (forward) branches.
555 BranchInfoSet unresolved_branches_;
556 #ifdef PANDA_BUILD
557 AllocatorWrapper allocator_;
558 #endif
559 };
560
561
562 // Helper for common Emission checks.
563 // The macro-instruction maps to a single instruction.
564 class SingleEmissionCheckScope : public EmissionCheckScope {
565 public:
SingleEmissionCheckScope(MacroAssemblerInterface * masm)566 explicit SingleEmissionCheckScope(MacroAssemblerInterface* masm)
567 : EmissionCheckScope(masm, kInstructionSize) {}
568 };
569
570
571 // The macro instruction is a "typical" macro-instruction. Typical macro-
572 // instruction only emit a few instructions, a few being defined as 8 here.
573 class MacroEmissionCheckScope : public EmissionCheckScope {
574 public:
MacroEmissionCheckScope(MacroAssemblerInterface * masm)575 explicit MacroEmissionCheckScope(MacroAssemblerInterface* masm)
576 : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
577
578 private:
579 static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
580 };
581
582
583 // This scope simplifies the handling of the SVE `movprfx` instruction.
584 //
585 // If dst.Aliases(src):
586 // - Start an ExactAssemblyScope(masm, kInstructionSize).
587 // Otherwise:
588 // - Start an ExactAssemblyScope(masm, 2 * kInstructionSize).
589 // - Generate a suitable `movprfx` instruction.
590 //
591 // In both cases, the ExactAssemblyScope is left with enough remaining space for
592 // exactly one destructive instruction.
593 class MovprfxHelperScope : public ExactAssemblyScope {
594 public:
595 inline MovprfxHelperScope(MacroAssembler* masm,
596 const ZRegister& dst,
597 const ZRegister& src);
598
599 inline MovprfxHelperScope(MacroAssembler* masm,
600 const ZRegister& dst,
601 const PRegister& pg,
602 const ZRegister& src);
603
604 // TODO: Implement constructors that examine _all_ sources. If `dst` aliases
605 // any other source register, we can't use `movprfx`. This isn't obviously
606 // useful, but the MacroAssembler should not generate invalid code for it.
607 // Valid behaviour can be implemented using `mov`.
608 //
609 // The best way to handle this in an instruction-agnostic way is probably to
610 // use variadic templates.
611
612 private:
ShouldGenerateMovprfx(const ZRegister & dst,const ZRegister & src)613 inline bool ShouldGenerateMovprfx(const ZRegister& dst,
614 const ZRegister& src) {
615 VIXL_ASSERT(AreSameLaneSize(dst, src));
616 return !dst.Aliases(src);
617 }
618
ShouldGenerateMovprfx(const ZRegister & dst,const PRegister & pg,const ZRegister & src)619 inline bool ShouldGenerateMovprfx(const ZRegister& dst,
620 const PRegister& pg,
621 const ZRegister& src) {
622 VIXL_ASSERT(pg.IsMerging() || pg.IsZeroing());
623 // We need to emit movprfx in two cases:
624 // 1. To give a predicated merging unary instruction zeroing predication.
625 // 2. To make destructive instructions constructive.
626 //
627 // There are no predicated zeroing instructions that can take movprfx, so we
628 // will never generate an unnecessary movprfx with this logic.
629 return pg.IsZeroing() || ShouldGenerateMovprfx(dst, src);
630 }
631 };
632
633
634 enum BranchType {
635 // Copies of architectural conditions.
636 // The associated conditions can be used in place of those, the code will
637 // take care of reinterpreting them with the correct type.
638 integer_eq = eq,
639 integer_ne = ne,
640 integer_hs = hs,
641 integer_lo = lo,
642 integer_mi = mi,
643 integer_pl = pl,
644 integer_vs = vs,
645 integer_vc = vc,
646 integer_hi = hi,
647 integer_ls = ls,
648 integer_ge = ge,
649 integer_lt = lt,
650 integer_gt = gt,
651 integer_le = le,
652 integer_al = al,
653 integer_nv = nv,
654
655 // These two are *different* from the architectural codes al and nv.
656 // 'always' is used to generate unconditional branches.
657 // 'never' is used to not generate a branch (generally as the inverse
658 // branch type of 'always).
659 always,
660 never,
661 // cbz and cbnz
662 reg_zero,
663 reg_not_zero,
664 // tbz and tbnz
665 reg_bit_clear,
666 reg_bit_set,
667
668 // Aliases.
669 kBranchTypeFirstCondition = eq,
670 kBranchTypeLastCondition = nv,
671 kBranchTypeFirstUsingReg = reg_zero,
672 kBranchTypeFirstUsingBit = reg_bit_clear,
673
674 // SVE branch conditions.
675 integer_none = eq,
676 integer_any = ne,
677 integer_nlast = cs,
678 integer_last = cc,
679 integer_first = mi,
680 integer_nfrst = pl,
681 integer_pmore = hi,
682 integer_plast = ls,
683 integer_tcont = ge,
684 integer_tstop = lt
685 };
686
687
688 enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
689
690 // The macro assembler supports moving automatically pre-shifted immediates for
691 // arithmetic and logical instructions, and then applying a post shift in the
692 // instruction to undo the modification, in order to reduce the code emitted for
693 // an operation. For example:
694 //
695 // Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
696 //
697 // This optimisation can be only partially applied when the stack pointer is an
698 // operand or destination, so this enumeration is used to control the shift.
699 enum PreShiftImmMode {
700 kNoShift, // Don't pre-shift.
701 kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
702 kAnyShift // Allow any pre-shift.
703 };
704
705 enum FPMacroNaNPropagationOption {
706 // The default option. This generates a run-time error in macros that respect
707 // this option.
708 NoFPMacroNaNPropagationSelected,
709 // For example, Fmin(result, NaN(a), NaN(b)) always selects NaN(a) if both
710 // NaN(a) and NaN(b) are both quiet, or both are signalling, at the
711 // cost of extra code generation in some cases.
712 StrictNaNPropagation,
713 // For example, Fmin(result, NaN(a), NaN(b)) selects either NaN, but using the
714 // fewest instructions.
715 FastNaNPropagation
716 };
717
718 class MacroAssembler : public Assembler, public MacroAssemblerInterface {
719 public:
720 #ifdef PANDA_BUILD
721 explicit MacroAssembler(PandaAllocator* allocator,
722 PositionIndependentCodeOption pic = PositionIndependentCode);
723 #else
724 explicit MacroAssembler(
725 PositionIndependentCodeOption pic = PositionIndependentCode);
726 #endif
727 #ifdef PANDA_BUILD
728 MacroAssembler(size_t capacity,
729 PositionIndependentCodeOption pic = PositionIndependentCode) = delete;
730 #else
731 MacroAssembler(size_t capacity,
732 PositionIndependentCodeOption pic = PositionIndependentCode);
733 #endif
734 #ifndef PANDA_BUILD
735 MacroAssembler(byte* buffer,
736 size_t capacity,
737 PositionIndependentCodeOption pic = PositionIndependentCode);
738 #else
739 MacroAssembler(PandaAllocator* allocator, byte* buffer,
740 size_t capacity,
741 PositionIndependentCodeOption pic = PositionIndependentCode);
742 #endif
743 ~MacroAssembler();
744
745 enum FinalizeOption {
746 kFallThrough, // There may be more code to execute after calling Finalize.
747 kUnreachable // Anything generated after calling Finalize is unreachable.
748 };
749
AsAssemblerBase()750 virtual vixl::internal::AssemblerBase* AsAssemblerBase() VIXL_OVERRIDE {
751 return this;
752 }
753
754 // TODO(pools): implement these functions.
EmitPoolHeader()755 virtual void EmitPoolHeader() VIXL_OVERRIDE {}
EmitPoolFooter()756 virtual void EmitPoolFooter() VIXL_OVERRIDE {}
EmitPaddingBytes(int n)757 virtual void EmitPaddingBytes(int n) VIXL_OVERRIDE { USE(n); }
EmitNopBytes(int n)758 virtual void EmitNopBytes(int n) VIXL_OVERRIDE { USE(n); }
759
760 // Start generating code from the beginning of the buffer, discarding any code
761 // and data that has already been emitted into the buffer.
762 //
763 // In order to avoid any accidental transfer of state, Reset ASSERTs that the
764 // constant pool is not blocked.
765 void Reset();
766
767 // Finalize a code buffer of generated instructions. This function must be
768 // called before executing or copying code from the buffer. By default,
769 // anything generated after this should not be reachable (the last instruction
770 // generated is an unconditional branch). If you need to generate more code,
771 // then set `option` to kFallThrough.
772 void FinalizeCode(FinalizeOption option = kUnreachable);
773
774
775 // Constant generation helpers.
776 // These functions return the number of instructions required to move the
777 // immediate into the destination register. Also, if the masm pointer is
778 // non-null, it generates the code to do so.
779 // The two features are implemented using one function to avoid duplication of
780 // the logic.
781 // The function can be used to evaluate the cost of synthesizing an
782 // instruction using 'mov immediate' instructions. A user might prefer loading
783 // a constant using the literal pool instead of using multiple 'mov immediate'
784 // instructions.
785 static int MoveImmediateHelper(MacroAssembler* masm,
786 const Register& rd,
787 uint64_t imm);
788
789
790 // Logical macros.
791 void And(const Register& rd, const Register& rn, const Operand& operand);
792 void Ands(const Register& rd, const Register& rn, const Operand& operand);
793 void Bic(const Register& rd, const Register& rn, const Operand& operand);
794 void Bics(const Register& rd, const Register& rn, const Operand& operand);
795 void Orr(const Register& rd, const Register& rn, const Operand& operand);
796 void Orn(const Register& rd, const Register& rn, const Operand& operand);
797 void Eor(const Register& rd, const Register& rn, const Operand& operand);
798 void Eon(const Register& rd, const Register& rn, const Operand& operand);
799 void Tst(const Register& rn, const Operand& operand);
800 void LogicalMacro(const Register& rd,
801 const Register& rn,
802 const Operand& operand,
803 LogicalOp op);
804
805 // Add and sub macros.
806 void Add(const Register& rd,
807 const Register& rn,
808 const Operand& operand,
809 FlagsUpdate S = LeaveFlags);
810 void Adds(const Register& rd, const Register& rn, const Operand& operand);
811 void Sub(const Register& rd,
812 const Register& rn,
813 const Operand& operand,
814 FlagsUpdate S = LeaveFlags);
815 void Subs(const Register& rd, const Register& rn, const Operand& operand);
816 void Cmn(const Register& rn, const Operand& operand);
817 void Cmp(const Register& rn, const Operand& operand);
818 void Neg(const Register& rd, const Operand& operand);
819 void Negs(const Register& rd, const Operand& operand);
820
821 void AddSubMacro(const Register& rd,
822 const Register& rn,
823 const Operand& operand,
824 FlagsUpdate S,
825 AddSubOp op);
826
827 // Add/sub with carry macros.
828 void Adc(const Register& rd, const Register& rn, const Operand& operand);
829 void Adcs(const Register& rd, const Register& rn, const Operand& operand);
830 void Sbc(const Register& rd, const Register& rn, const Operand& operand);
831 void Sbcs(const Register& rd, const Register& rn, const Operand& operand);
832 void Ngc(const Register& rd, const Operand& operand);
833 void Ngcs(const Register& rd, const Operand& operand);
834 void AddSubWithCarryMacro(const Register& rd,
835 const Register& rn,
836 const Operand& operand,
837 FlagsUpdate S,
838 AddSubWithCarryOp op);
839
840 void Rmif(const Register& xn, unsigned shift, StatusFlags flags);
841 void Setf8(const Register& wn);
842 void Setf16(const Register& wn);
843
844 // Move macros.
845 void Mov(const Register& rd, uint64_t imm);
846 void Mov(const Register& rd,
847 const Operand& operand,
848 DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
Mvn(const Register & rd,uint64_t imm)849 void Mvn(const Register& rd, uint64_t imm) {
850 Mov(rd, (rd.GetSizeInBits() == kXRegSize) ? ~imm : (~imm & kWRegMask));
851 }
852 void Mvn(const Register& rd, const Operand& operand);
853
854 // Try to move an immediate into the destination register in a single
855 // instruction. Returns true for success, and updates the contents of dst.
856 // Returns false, otherwise.
857 bool TryOneInstrMoveImmediate(const Register& dst, uint64_t imm);
858
859 // Move an immediate into register dst, and return an Operand object for
860 // use with a subsequent instruction that accepts a shift. The value moved
861 // into dst is not necessarily equal to imm; it may have had a shifting
862 // operation applied to it that will be subsequently undone by the shift
863 // applied in the Operand.
864 Operand MoveImmediateForShiftedOp(const Register& dst,
865 uint64_t imm,
866 PreShiftImmMode mode);
867
868 void Move(const GenericOperand& dst, const GenericOperand& src);
869
870 // Synthesises the address represented by a MemOperand into a register.
871 void ComputeAddress(const Register& dst, const MemOperand& mem_op);
872
873 // Conditional macros.
874 void Ccmp(const Register& rn,
875 const Operand& operand,
876 StatusFlags nzcv,
877 Condition cond);
878 void Ccmn(const Register& rn,
879 const Operand& operand,
880 StatusFlags nzcv,
881 Condition cond);
882 void ConditionalCompareMacro(const Register& rn,
883 const Operand& operand,
884 StatusFlags nzcv,
885 Condition cond,
886 ConditionalCompareOp op);
887
888 // On return, the boolean values pointed to will indicate whether `left` and
889 // `right` should be synthesised in a temporary register.
GetCselSynthesisInformation(const Register & rd,const Operand & left,const Operand & right,bool * should_synthesise_left,bool * should_synthesise_right)890 static void GetCselSynthesisInformation(const Register& rd,
891 const Operand& left,
892 const Operand& right,
893 bool* should_synthesise_left,
894 bool* should_synthesise_right) {
895 // Note that the helper does not need to look at the condition.
896 CselHelper(NULL,
897 rd,
898 left,
899 right,
900 eq,
901 should_synthesise_left,
902 should_synthesise_right);
903 }
904
Csel(const Register & rd,const Operand & left,const Operand & right,Condition cond)905 void Csel(const Register& rd,
906 const Operand& left,
907 const Operand& right,
908 Condition cond) {
909 CselHelper(this, rd, left, right, cond);
910 }
911
912 // Load/store macros.
913 #define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
914 void FN(const REGTYPE REG, const MemOperand& addr);
915 LS_MACRO_LIST(DECLARE_FUNCTION)
916 #undef DECLARE_FUNCTION
917
918 void LoadStoreMacro(const CPURegister& rt,
919 const MemOperand& addr,
920 LoadStoreOp op);
921
922 #define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
923 void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
924 LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
925 #undef DECLARE_FUNCTION
926
927 void LoadStorePairMacro(const CPURegister& rt,
928 const CPURegister& rt2,
929 const MemOperand& addr,
930 LoadStorePairOp op);
931
932 void Prfm(PrefetchOperation op, const MemOperand& addr);
933
934 // Push or pop up to 4 registers of the same width to or from the stack,
935 // using the current stack pointer as set by SetStackPointer.
936 //
937 // If an argument register is 'NoReg', all further arguments are also assumed
938 // to be 'NoReg', and are thus not pushed or popped.
939 //
940 // Arguments are ordered such that "Push(a, b);" is functionally equivalent
941 // to "Push(a); Push(b);".
942 //
943 // It is valid to push the same register more than once, and there is no
944 // restriction on the order in which registers are specified.
945 //
946 // It is not valid to pop into the same register more than once in one
947 // operation, not even into the zero register.
948 //
949 // If the current stack pointer (as set by SetStackPointer) is sp, then it
950 // must be aligned to 16 bytes on entry and the total size of the specified
951 // registers must also be a multiple of 16 bytes.
952 //
953 // Even if the current stack pointer is not the system stack pointer (sp),
954 // Push (and derived methods) will still modify the system stack pointer in
955 // order to comply with ABI rules about accessing memory below the system
956 // stack pointer.
957 //
958 // Other than the registers passed into Pop, the stack pointer and (possibly)
959 // the system stack pointer, these methods do not modify any other registers.
960 void Push(const CPURegister& src0,
961 const CPURegister& src1 = NoReg,
962 const CPURegister& src2 = NoReg,
963 const CPURegister& src3 = NoReg);
964 void Pop(const CPURegister& dst0,
965 const CPURegister& dst1 = NoReg,
966 const CPURegister& dst2 = NoReg,
967 const CPURegister& dst3 = NoReg);
968
969 // Alternative forms of Push and Pop, taking a RegList or CPURegList that
970 // specifies the registers that are to be pushed or popped. Higher-numbered
971 // registers are associated with higher memory addresses (as in the A32 push
972 // and pop instructions).
973 //
974 // (Push|Pop)SizeRegList allow you to specify the register size as a
975 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
976 // supported.
977 //
978 // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
979 void PushCPURegList(CPURegList registers);
980 void PopCPURegList(CPURegList registers);
981
982 void PushSizeRegList(
983 RegList registers,
984 unsigned reg_size,
985 CPURegister::RegisterType type = CPURegister::kRegister) {
986 PushCPURegList(CPURegList(type, reg_size, registers));
987 }
988 void PopSizeRegList(RegList registers,
989 unsigned reg_size,
990 CPURegister::RegisterType type = CPURegister::kRegister) {
991 PopCPURegList(CPURegList(type, reg_size, registers));
992 }
PushXRegList(RegList regs)993 void PushXRegList(RegList regs) { PushSizeRegList(regs, kXRegSize); }
PopXRegList(RegList regs)994 void PopXRegList(RegList regs) { PopSizeRegList(regs, kXRegSize); }
PushWRegList(RegList regs)995 void PushWRegList(RegList regs) { PushSizeRegList(regs, kWRegSize); }
PopWRegList(RegList regs)996 void PopWRegList(RegList regs) { PopSizeRegList(regs, kWRegSize); }
PushDRegList(RegList regs)997 void PushDRegList(RegList regs) {
998 PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
999 }
PopDRegList(RegList regs)1000 void PopDRegList(RegList regs) {
1001 PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
1002 }
PushSRegList(RegList regs)1003 void PushSRegList(RegList regs) {
1004 PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
1005 }
PopSRegList(RegList regs)1006 void PopSRegList(RegList regs) {
1007 PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
1008 }
1009
1010 // Push the specified register 'count' times.
1011 void PushMultipleTimes(int count, Register src);
1012
1013 // Poke 'src' onto the stack. The offset is in bytes.
1014 //
1015 // If the current stack pointer (as set by SetStackPointer) is sp, then sp
1016 // must be aligned to 16 bytes.
1017 void Poke(const Register& src, const Operand& offset);
1018
1019 // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
1020 //
1021 // If the current stack pointer (as set by SetStackPointer) is sp, then sp
1022 // must be aligned to 16 bytes.
1023 void Peek(const Register& dst, const Operand& offset);
1024
1025 // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
1026 // specifies the registers that are to be pushed or popped. Higher-numbered
1027 // registers are associated with higher memory addresses.
1028 //
1029 // (Peek|Poke)SizeRegList allow you to specify the register size as a
1030 // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
1031 // supported.
1032 //
1033 // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
PeekCPURegList(CPURegList registers,int64_t offset)1034 void PeekCPURegList(CPURegList registers, int64_t offset) {
1035 LoadCPURegList(registers, MemOperand(StackPointer(), offset));
1036 }
PokeCPURegList(CPURegList registers,int64_t offset)1037 void PokeCPURegList(CPURegList registers, int64_t offset) {
1038 StoreCPURegList(registers, MemOperand(StackPointer(), offset));
1039 }
1040
1041 void PeekSizeRegList(
1042 RegList registers,
1043 int64_t offset,
1044 unsigned reg_size,
1045 CPURegister::RegisterType type = CPURegister::kRegister) {
1046 PeekCPURegList(CPURegList(type, reg_size, registers), offset);
1047 }
1048 void PokeSizeRegList(
1049 RegList registers,
1050 int64_t offset,
1051 unsigned reg_size,
1052 CPURegister::RegisterType type = CPURegister::kRegister) {
1053 PokeCPURegList(CPURegList(type, reg_size, registers), offset);
1054 }
PeekXRegList(RegList regs,int64_t offset)1055 void PeekXRegList(RegList regs, int64_t offset) {
1056 PeekSizeRegList(regs, offset, kXRegSize);
1057 }
PokeXRegList(RegList regs,int64_t offset)1058 void PokeXRegList(RegList regs, int64_t offset) {
1059 PokeSizeRegList(regs, offset, kXRegSize);
1060 }
PeekWRegList(RegList regs,int64_t offset)1061 void PeekWRegList(RegList regs, int64_t offset) {
1062 PeekSizeRegList(regs, offset, kWRegSize);
1063 }
PokeWRegList(RegList regs,int64_t offset)1064 void PokeWRegList(RegList regs, int64_t offset) {
1065 PokeSizeRegList(regs, offset, kWRegSize);
1066 }
PeekDRegList(RegList regs,int64_t offset)1067 void PeekDRegList(RegList regs, int64_t offset) {
1068 PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
1069 }
PokeDRegList(RegList regs,int64_t offset)1070 void PokeDRegList(RegList regs, int64_t offset) {
1071 PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
1072 }
PeekSRegList(RegList regs,int64_t offset)1073 void PeekSRegList(RegList regs, int64_t offset) {
1074 PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
1075 }
PokeSRegList(RegList regs,int64_t offset)1076 void PokeSRegList(RegList regs, int64_t offset) {
1077 PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
1078 }
1079
1080
1081 // Claim or drop stack space without actually accessing memory.
1082 //
1083 // If the current stack pointer (as set by SetStackPointer) is sp, then it
1084 // must be aligned to 16 bytes and the size claimed or dropped must be a
1085 // multiple of 16 bytes.
1086 void Claim(const Operand& size);
1087 void Drop(const Operand& size);
1088
1089 // As above, but for multiples of the SVE vector length.
ClaimVL(int64_t multiplier)1090 void ClaimVL(int64_t multiplier) {
1091 // We never need to worry about sp alignment because the VL is always a
1092 // multiple of 16.
1093 VIXL_STATIC_ASSERT((kZRegMinSizeInBytes % 16) == 0);
1094 VIXL_ASSERT(multiplier >= 0);
1095 Addvl(sp, sp, -multiplier);
1096 }
DropVL(int64_t multiplier)1097 void DropVL(int64_t multiplier) {
1098 VIXL_STATIC_ASSERT((kZRegMinSizeInBytes % 16) == 0);
1099 VIXL_ASSERT(multiplier >= 0);
1100 Addvl(sp, sp, multiplier);
1101 }
1102
1103 // Preserve the callee-saved registers (as defined by AAPCS64).
1104 //
1105 // Higher-numbered registers are pushed before lower-numbered registers, and
1106 // thus get higher addresses.
1107 // Floating-point registers are pushed before general-purpose registers, and
1108 // thus get higher addresses.
1109 //
1110 // This method must not be called unless StackPointer() is sp, and it is
1111 // aligned to 16 bytes.
1112 void PushCalleeSavedRegisters();
1113
1114 // Restore the callee-saved registers (as defined by AAPCS64).
1115 //
1116 // Higher-numbered registers are popped after lower-numbered registers, and
1117 // thus come from higher addresses.
1118 // Floating-point registers are popped after general-purpose registers, and
1119 // thus come from higher addresses.
1120 //
1121 // This method must not be called unless StackPointer() is sp, and it is
1122 // aligned to 16 bytes.
1123 void PopCalleeSavedRegisters();
1124
1125 void LoadCPURegList(CPURegList registers, const MemOperand& src);
1126 void StoreCPURegList(CPURegList registers, const MemOperand& dst);
1127
1128 // Remaining instructions are simple pass-through calls to the assembler.
Adr(const Register & rd,Label * label)1129 void Adr(const Register& rd, Label* label) {
1130 VIXL_ASSERT(allow_macro_instructions_);
1131 VIXL_ASSERT(!rd.IsZero());
1132 SingleEmissionCheckScope guard(this);
1133 adr(rd, label);
1134 }
Adrp(const Register & rd,Label * label)1135 void Adrp(const Register& rd, Label* label) {
1136 VIXL_ASSERT(allow_macro_instructions_);
1137 VIXL_ASSERT(!rd.IsZero());
1138 SingleEmissionCheckScope guard(this);
1139 adrp(rd, label);
1140 }
Asr(const Register & rd,const Register & rn,unsigned shift)1141 void Asr(const Register& rd, const Register& rn, unsigned shift) {
1142 VIXL_ASSERT(allow_macro_instructions_);
1143 VIXL_ASSERT(!rd.IsZero());
1144 VIXL_ASSERT(!rn.IsZero());
1145 SingleEmissionCheckScope guard(this);
1146 asr(rd, rn, shift);
1147 }
Asr(const Register & rd,const Register & rn,const Register & rm)1148 void Asr(const Register& rd, const Register& rn, const Register& rm) {
1149 VIXL_ASSERT(allow_macro_instructions_);
1150 VIXL_ASSERT(!rd.IsZero());
1151 VIXL_ASSERT(!rn.IsZero());
1152 VIXL_ASSERT(!rm.IsZero());
1153 SingleEmissionCheckScope guard(this);
1154 asrv(rd, rn, rm);
1155 }
1156
1157 // Branch type inversion relies on these relations.
1158 VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
1159 (reg_bit_clear == (reg_bit_set ^ 1)) &&
1160 (always == (never ^ 1)));
1161
InvertBranchType(BranchType type)1162 BranchType InvertBranchType(BranchType type) {
1163 if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
1164 return static_cast<BranchType>(
1165 InvertCondition(static_cast<Condition>(type)));
1166 } else {
1167 return static_cast<BranchType>(type ^ 1);
1168 }
1169 }
1170
1171 void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
1172
1173 void B(Label* label);
1174 void B(Label* label, Condition cond);
B(Condition cond,Label * label)1175 void B(Condition cond, Label* label) { B(label, cond); }
Bfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)1176 void Bfm(const Register& rd,
1177 const Register& rn,
1178 unsigned immr,
1179 unsigned imms) {
1180 VIXL_ASSERT(allow_macro_instructions_);
1181 VIXL_ASSERT(!rd.IsZero());
1182 VIXL_ASSERT(!rn.IsZero());
1183 SingleEmissionCheckScope guard(this);
1184 bfm(rd, rn, immr, imms);
1185 }
Bfi(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1186 void Bfi(const Register& rd,
1187 const Register& rn,
1188 unsigned lsb,
1189 unsigned width) {
1190 VIXL_ASSERT(allow_macro_instructions_);
1191 VIXL_ASSERT(!rd.IsZero());
1192 VIXL_ASSERT(!rn.IsZero());
1193 SingleEmissionCheckScope guard(this);
1194 bfi(rd, rn, lsb, width);
1195 }
Bfc(const Register & rd,unsigned lsb,unsigned width)1196 void Bfc(const Register& rd, unsigned lsb, unsigned width) {
1197 VIXL_ASSERT(allow_macro_instructions_);
1198 VIXL_ASSERT(!rd.IsZero());
1199 SingleEmissionCheckScope guard(this);
1200 bfc(rd, lsb, width);
1201 }
Bfxil(const Register & rd,const Register & rn,unsigned lsb,unsigned width)1202 void Bfxil(const Register& rd,
1203 const Register& rn,
1204 unsigned lsb,
1205 unsigned width) {
1206 VIXL_ASSERT(allow_macro_instructions_);
1207 VIXL_ASSERT(!rd.IsZero());
1208 VIXL_ASSERT(!rn.IsZero());
1209 SingleEmissionCheckScope guard(this);
1210 bfxil(rd, rn, lsb, width);
1211 }
1212 void Bind(Label* label, BranchTargetIdentifier id = EmitBTI_none);
1213 // Bind a label to a specified offset from the start of the buffer.
1214 void BindToOffset(Label* label, ptrdiff_t offset);
Bl(Label * label)1215 void Bl(Label* label) {
1216 VIXL_ASSERT(allow_macro_instructions_);
1217 SingleEmissionCheckScope guard(this);
1218 bl(label);
1219 }
Bl(int64_t offset)1220 void Bl(int64_t offset) {
1221 VIXL_ASSERT(allow_macro_instructions_);
1222 SingleEmissionCheckScope guard(this);
1223 bl(offset >> kInstructionSizeLog2);
1224 }
Blr(const Register & xn)1225 void Blr(const Register& xn) {
1226 VIXL_ASSERT(allow_macro_instructions_);
1227 VIXL_ASSERT(!xn.IsZero());
1228 SingleEmissionCheckScope guard(this);
1229 blr(xn);
1230 }
Br(const Register & xn)1231 void Br(const Register& xn) {
1232 VIXL_ASSERT(allow_macro_instructions_);
1233 VIXL_ASSERT(!xn.IsZero());
1234 SingleEmissionCheckScope guard(this);
1235 br(xn);
1236 }
Braaz(const Register & xn)1237 void Braaz(const Register& xn) {
1238 VIXL_ASSERT(allow_macro_instructions_);
1239 SingleEmissionCheckScope guard(this);
1240 braaz(xn);
1241 }
Brabz(const Register & xn)1242 void Brabz(const Register& xn) {
1243 VIXL_ASSERT(allow_macro_instructions_);
1244 SingleEmissionCheckScope guard(this);
1245 brabz(xn);
1246 }
Blraaz(const Register & xn)1247 void Blraaz(const Register& xn) {
1248 VIXL_ASSERT(allow_macro_instructions_);
1249 SingleEmissionCheckScope guard(this);
1250 blraaz(xn);
1251 }
Blrabz(const Register & xn)1252 void Blrabz(const Register& xn) {
1253 VIXL_ASSERT(allow_macro_instructions_);
1254 SingleEmissionCheckScope guard(this);
1255 blrabz(xn);
1256 }
Retaa()1257 void Retaa() {
1258 VIXL_ASSERT(allow_macro_instructions_);
1259 SingleEmissionCheckScope guard(this);
1260 retaa();
1261 }
Retab()1262 void Retab() {
1263 VIXL_ASSERT(allow_macro_instructions_);
1264 SingleEmissionCheckScope guard(this);
1265 retab();
1266 }
Braa(const Register & xn,const Register & xm)1267 void Braa(const Register& xn, const Register& xm) {
1268 VIXL_ASSERT(allow_macro_instructions_);
1269 SingleEmissionCheckScope guard(this);
1270 braa(xn, xm);
1271 }
Brab(const Register & xn,const Register & xm)1272 void Brab(const Register& xn, const Register& xm) {
1273 VIXL_ASSERT(allow_macro_instructions_);
1274 SingleEmissionCheckScope guard(this);
1275 brab(xn, xm);
1276 }
Blraa(const Register & xn,const Register & xm)1277 void Blraa(const Register& xn, const Register& xm) {
1278 VIXL_ASSERT(allow_macro_instructions_);
1279 SingleEmissionCheckScope guard(this);
1280 blraa(xn, xm);
1281 }
Blrab(const Register & xn,const Register & xm)1282 void Blrab(const Register& xn, const Register& xm) {
1283 VIXL_ASSERT(allow_macro_instructions_);
1284 SingleEmissionCheckScope guard(this);
1285 blrab(xn, xm);
1286 }
1287 void Brk(int code = 0) {
1288 VIXL_ASSERT(allow_macro_instructions_);
1289 SingleEmissionCheckScope guard(this);
1290 brk(code);
1291 }
1292 void Cbnz(const Register& rt, Label* label);
1293 void Cbz(const Register& rt, Label* label);
Cinc(const Register & rd,const Register & rn,Condition cond)1294 void Cinc(const Register& rd, const Register& rn, Condition cond) {
1295 VIXL_ASSERT(allow_macro_instructions_);
1296 VIXL_ASSERT(!rd.IsZero());
1297 VIXL_ASSERT(!rn.IsZero());
1298 SingleEmissionCheckScope guard(this);
1299 cinc(rd, rn, cond);
1300 }
Cinv(const Register & rd,const Register & rn,Condition cond)1301 void Cinv(const Register& rd, const Register& rn, Condition cond) {
1302 VIXL_ASSERT(allow_macro_instructions_);
1303 VIXL_ASSERT(!rd.IsZero());
1304 VIXL_ASSERT(!rn.IsZero());
1305 SingleEmissionCheckScope guard(this);
1306 cinv(rd, rn, cond);
1307 }
1308
1309 #define PAUTH_SYSTEM_MODES(V) \
1310 V(az) \
1311 V(bz) \
1312 V(asp) \
1313 V(bsp)
1314
1315 #define DEFINE_MACRO_ASM_FUNCS(SUFFIX) \
1316 void Paci##SUFFIX() { \
1317 VIXL_ASSERT(allow_macro_instructions_); \
1318 SingleEmissionCheckScope guard(this); \
1319 paci##SUFFIX(); \
1320 } \
1321 void Auti##SUFFIX() { \
1322 VIXL_ASSERT(allow_macro_instructions_); \
1323 SingleEmissionCheckScope guard(this); \
1324 auti##SUFFIX(); \
1325 }
1326
PAUTH_SYSTEM_MODES(DEFINE_MACRO_ASM_FUNCS)1327 PAUTH_SYSTEM_MODES(DEFINE_MACRO_ASM_FUNCS)
1328 #undef DEFINE_MACRO_ASM_FUNCS
1329
1330 // The 1716 pac and aut instructions encourage people to use x16 and x17
1331 // directly, perhaps without realising that this is forbidden. For example:
1332 //
1333 // UseScratchRegisterScope temps(&masm);
1334 // Register temp = temps.AcquireX(); // temp will be x16
1335 // __ Mov(x17, ptr);
1336 // __ Mov(x16, modifier); // Will override temp!
1337 // __ Pacia1716();
1338 //
1339 // To work around this issue, you must exclude x16 and x17 from the scratch
1340 // register list. You may need to replace them with other registers:
1341 //
1342 // UseScratchRegisterScope temps(&masm);
1343 // temps.Exclude(x16, x17);
1344 // temps.Include(x10, x11);
1345 // __ Mov(x17, ptr);
1346 // __ Mov(x16, modifier);
1347 // __ Pacia1716();
1348 void Pacia1716() {
1349 VIXL_ASSERT(allow_macro_instructions_);
1350 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1351 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1352 SingleEmissionCheckScope guard(this);
1353 pacia1716();
1354 }
Pacib1716()1355 void Pacib1716() {
1356 VIXL_ASSERT(allow_macro_instructions_);
1357 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1358 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1359 SingleEmissionCheckScope guard(this);
1360 pacib1716();
1361 }
Autia1716()1362 void Autia1716() {
1363 VIXL_ASSERT(allow_macro_instructions_);
1364 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1365 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1366 SingleEmissionCheckScope guard(this);
1367 autia1716();
1368 }
Autib1716()1369 void Autib1716() {
1370 VIXL_ASSERT(allow_macro_instructions_);
1371 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x16));
1372 VIXL_ASSERT(!GetScratchRegisterList()->IncludesAliasOf(x17));
1373 SingleEmissionCheckScope guard(this);
1374 autib1716();
1375 }
Xpaclri()1376 void Xpaclri() {
1377 VIXL_ASSERT(allow_macro_instructions_);
1378 SingleEmissionCheckScope guard(this);
1379 xpaclri();
1380 }
Clrex()1381 void Clrex() {
1382 VIXL_ASSERT(allow_macro_instructions_);
1383 SingleEmissionCheckScope guard(this);
1384 clrex();
1385 }
Cls(const Register & rd,const Register & rn)1386 void Cls(const Register& rd, const Register& rn) {
1387 VIXL_ASSERT(allow_macro_instructions_);
1388 VIXL_ASSERT(!rd.IsZero());
1389 VIXL_ASSERT(!rn.IsZero());
1390 SingleEmissionCheckScope guard(this);
1391 cls(rd, rn);
1392 }
Clz(const Register & rd,const Register & rn)1393 void Clz(const Register& rd, const Register& rn) {
1394 VIXL_ASSERT(allow_macro_instructions_);
1395 VIXL_ASSERT(!rd.IsZero());
1396 VIXL_ASSERT(!rn.IsZero());
1397 SingleEmissionCheckScope guard(this);
1398 clz(rd, rn);
1399 }
Cneg(const Register & rd,const Register & rn,Condition cond)1400 void Cneg(const Register& rd, const Register& rn, Condition cond) {
1401 VIXL_ASSERT(allow_macro_instructions_);
1402 VIXL_ASSERT(!rd.IsZero());
1403 VIXL_ASSERT(!rn.IsZero());
1404 SingleEmissionCheckScope guard(this);
1405 cneg(rd, rn, cond);
1406 }
Esb()1407 void Esb() {
1408 VIXL_ASSERT(allow_macro_instructions_);
1409 SingleEmissionCheckScope guard(this);
1410 esb();
1411 }
Csdb()1412 void Csdb() {
1413 VIXL_ASSERT(allow_macro_instructions_);
1414 SingleEmissionCheckScope guard(this);
1415 csdb();
1416 }
Cset(const Register & rd,Condition cond)1417 void Cset(const Register& rd, Condition cond) {
1418 VIXL_ASSERT(allow_macro_instructions_);
1419 VIXL_ASSERT(!rd.IsZero());
1420 SingleEmissionCheckScope guard(this);
1421 cset(rd, cond);
1422 }
Csetm(const Register & rd,Condition cond)1423 void Csetm(const Register& rd, Condition cond) {
1424 VIXL_ASSERT(allow_macro_instructions_);
1425 VIXL_ASSERT(!rd.IsZero());
1426 SingleEmissionCheckScope guard(this);
1427 csetm(rd, cond);
1428 }
Csinc(const Register & rd,const Register & rn,const Register & rm,Condition cond)1429 void Csinc(const Register& rd,
1430 const Register& rn,
1431 const Register& rm,
1432 Condition cond) {
1433 VIXL_ASSERT(allow_macro_instructions_);
1434 VIXL_ASSERT(!rd.IsZero());
1435 VIXL_ASSERT((cond != al) && (cond != nv));
1436 SingleEmissionCheckScope guard(this);
1437 csinc(rd, rn, rm, cond);
1438 }
Csinv(const Register & rd,const Register & rn,const Register & rm,Condition cond)1439 void Csinv(const Register& rd,
1440 const Register& rn,
1441 const Register& rm,
1442 Condition cond) {
1443 VIXL_ASSERT(allow_macro_instructions_);
1444 VIXL_ASSERT(!rd.IsZero());
1445 VIXL_ASSERT((cond != al) && (cond != nv));
1446 SingleEmissionCheckScope guard(this);
1447 csinv(rd, rn, rm, cond);
1448 }
Csneg(const Register & rd,const Register & rn,const Register & rm,Condition cond)1449 void Csneg(const Register& rd,
1450 const Register& rn,
1451 const Register& rm,
1452 Condition cond) {
1453 VIXL_ASSERT(allow_macro_instructions_);
1454 VIXL_ASSERT(!rd.IsZero());
1455 VIXL_ASSERT((cond != al) && (cond != nv));
1456 SingleEmissionCheckScope guard(this);
1457 csneg(rd, rn, rm, cond);
1458 }
Dmb(BarrierDomain domain,BarrierType type)1459 void Dmb(BarrierDomain domain, BarrierType type) {
1460 VIXL_ASSERT(allow_macro_instructions_);
1461 SingleEmissionCheckScope guard(this);
1462 dmb(domain, type);
1463 }
Dsb(BarrierDomain domain,BarrierType type)1464 void Dsb(BarrierDomain domain, BarrierType type) {
1465 VIXL_ASSERT(allow_macro_instructions_);
1466 SingleEmissionCheckScope guard(this);
1467 dsb(domain, type);
1468 }
Extr(const Register & rd,const Register & rn,const Register & rm,unsigned lsb)1469 void Extr(const Register& rd,
1470 const Register& rn,
1471 const Register& rm,
1472 unsigned lsb) {
1473 VIXL_ASSERT(allow_macro_instructions_);
1474 VIXL_ASSERT(!rd.IsZero());
1475 VIXL_ASSERT(!rn.IsZero());
1476 VIXL_ASSERT(!rm.IsZero());
1477 SingleEmissionCheckScope guard(this);
1478 extr(rd, rn, rm, lsb);
1479 }
Fadd(const VRegister & vd,const VRegister & vn,const VRegister & vm)1480 void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1481 VIXL_ASSERT(allow_macro_instructions_);
1482 SingleEmissionCheckScope guard(this);
1483 fadd(vd, vn, vm);
1484 }
1485 void Fccmp(const VRegister& vn,
1486 const VRegister& vm,
1487 StatusFlags nzcv,
1488 Condition cond,
1489 FPTrapFlags trap = DisableTrap) {
1490 VIXL_ASSERT(allow_macro_instructions_);
1491 VIXL_ASSERT((cond != al) && (cond != nv));
1492 SingleEmissionCheckScope guard(this);
1493 FPCCompareMacro(vn, vm, nzcv, cond, trap);
1494 }
Fccmpe(const VRegister & vn,const VRegister & vm,StatusFlags nzcv,Condition cond)1495 void Fccmpe(const VRegister& vn,
1496 const VRegister& vm,
1497 StatusFlags nzcv,
1498 Condition cond) {
1499 Fccmp(vn, vm, nzcv, cond, EnableTrap);
1500 }
1501 void Fcmp(const VRegister& vn,
1502 const VRegister& vm,
1503 FPTrapFlags trap = DisableTrap) {
1504 VIXL_ASSERT(allow_macro_instructions_);
1505 SingleEmissionCheckScope guard(this);
1506 FPCompareMacro(vn, vm, trap);
1507 }
1508 void Fcmp(const VRegister& vn, double value, FPTrapFlags trap = DisableTrap);
1509 void Fcmpe(const VRegister& vn, double value);
Fcmpe(const VRegister & vn,const VRegister & vm)1510 void Fcmpe(const VRegister& vn, const VRegister& vm) {
1511 Fcmp(vn, vm, EnableTrap);
1512 }
Fcsel(const VRegister & vd,const VRegister & vn,const VRegister & vm,Condition cond)1513 void Fcsel(const VRegister& vd,
1514 const VRegister& vn,
1515 const VRegister& vm,
1516 Condition cond) {
1517 VIXL_ASSERT(allow_macro_instructions_);
1518 VIXL_ASSERT((cond != al) && (cond != nv));
1519 SingleEmissionCheckScope guard(this);
1520 fcsel(vd, vn, vm, cond);
1521 }
Fcvt(const VRegister & vd,const VRegister & vn)1522 void Fcvt(const VRegister& vd, const VRegister& vn) {
1523 VIXL_ASSERT(allow_macro_instructions_);
1524 SingleEmissionCheckScope guard(this);
1525 fcvt(vd, vn);
1526 }
Fcvtl(const VRegister & vd,const VRegister & vn)1527 void Fcvtl(const VRegister& vd, const VRegister& vn) {
1528 VIXL_ASSERT(allow_macro_instructions_);
1529 SingleEmissionCheckScope guard(this);
1530 fcvtl(vd, vn);
1531 }
Fcvtl2(const VRegister & vd,const VRegister & vn)1532 void Fcvtl2(const VRegister& vd, const VRegister& vn) {
1533 VIXL_ASSERT(allow_macro_instructions_);
1534 SingleEmissionCheckScope guard(this);
1535 fcvtl2(vd, vn);
1536 }
Fcvtn(const VRegister & vd,const VRegister & vn)1537 void Fcvtn(const VRegister& vd, const VRegister& vn) {
1538 VIXL_ASSERT(allow_macro_instructions_);
1539 SingleEmissionCheckScope guard(this);
1540 fcvtn(vd, vn);
1541 }
Fcvtn2(const VRegister & vd,const VRegister & vn)1542 void Fcvtn2(const VRegister& vd, const VRegister& vn) {
1543 VIXL_ASSERT(allow_macro_instructions_);
1544 SingleEmissionCheckScope guard(this);
1545 fcvtn2(vd, vn);
1546 }
Fcvtxn(const VRegister & vd,const VRegister & vn)1547 void Fcvtxn(const VRegister& vd, const VRegister& vn) {
1548 VIXL_ASSERT(allow_macro_instructions_);
1549 SingleEmissionCheckScope guard(this);
1550 fcvtxn(vd, vn);
1551 }
Fcvtxn2(const VRegister & vd,const VRegister & vn)1552 void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
1553 VIXL_ASSERT(allow_macro_instructions_);
1554 SingleEmissionCheckScope guard(this);
1555 fcvtxn2(vd, vn);
1556 }
Fcvtas(const Register & rd,const VRegister & vn)1557 void Fcvtas(const Register& rd, const VRegister& vn) {
1558 VIXL_ASSERT(allow_macro_instructions_);
1559 VIXL_ASSERT(!rd.IsZero());
1560 SingleEmissionCheckScope guard(this);
1561 fcvtas(rd, vn);
1562 }
Fcvtau(const Register & rd,const VRegister & vn)1563 void Fcvtau(const Register& rd, const VRegister& vn) {
1564 VIXL_ASSERT(allow_macro_instructions_);
1565 VIXL_ASSERT(!rd.IsZero());
1566 SingleEmissionCheckScope guard(this);
1567 fcvtau(rd, vn);
1568 }
Fcvtms(const Register & rd,const VRegister & vn)1569 void Fcvtms(const Register& rd, const VRegister& vn) {
1570 VIXL_ASSERT(allow_macro_instructions_);
1571 VIXL_ASSERT(!rd.IsZero());
1572 SingleEmissionCheckScope guard(this);
1573 fcvtms(rd, vn);
1574 }
Fcvtmu(const Register & rd,const VRegister & vn)1575 void Fcvtmu(const Register& rd, const VRegister& vn) {
1576 VIXL_ASSERT(allow_macro_instructions_);
1577 VIXL_ASSERT(!rd.IsZero());
1578 SingleEmissionCheckScope guard(this);
1579 fcvtmu(rd, vn);
1580 }
Fcvtns(const Register & rd,const VRegister & vn)1581 void Fcvtns(const Register& rd, const VRegister& vn) {
1582 VIXL_ASSERT(allow_macro_instructions_);
1583 VIXL_ASSERT(!rd.IsZero());
1584 SingleEmissionCheckScope guard(this);
1585 fcvtns(rd, vn);
1586 }
Fcvtnu(const Register & rd,const VRegister & vn)1587 void Fcvtnu(const Register& rd, const VRegister& vn) {
1588 VIXL_ASSERT(allow_macro_instructions_);
1589 VIXL_ASSERT(!rd.IsZero());
1590 SingleEmissionCheckScope guard(this);
1591 fcvtnu(rd, vn);
1592 }
Fcvtps(const Register & rd,const VRegister & vn)1593 void Fcvtps(const Register& rd, const VRegister& vn) {
1594 VIXL_ASSERT(allow_macro_instructions_);
1595 VIXL_ASSERT(!rd.IsZero());
1596 SingleEmissionCheckScope guard(this);
1597 fcvtps(rd, vn);
1598 }
Fcvtpu(const Register & rd,const VRegister & vn)1599 void Fcvtpu(const Register& rd, const VRegister& vn) {
1600 VIXL_ASSERT(allow_macro_instructions_);
1601 VIXL_ASSERT(!rd.IsZero());
1602 SingleEmissionCheckScope guard(this);
1603 fcvtpu(rd, vn);
1604 }
1605 void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
1606 VIXL_ASSERT(allow_macro_instructions_);
1607 VIXL_ASSERT(!rd.IsZero());
1608 SingleEmissionCheckScope guard(this);
1609 fcvtzs(rd, vn, fbits);
1610 }
Fjcvtzs(const Register & rd,const VRegister & vn)1611 void Fjcvtzs(const Register& rd, const VRegister& vn) {
1612 VIXL_ASSERT(allow_macro_instructions_);
1613 VIXL_ASSERT(!rd.IsZero());
1614 SingleEmissionCheckScope guard(this);
1615 fjcvtzs(rd, vn);
1616 }
1617 void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
1618 VIXL_ASSERT(allow_macro_instructions_);
1619 VIXL_ASSERT(!rd.IsZero());
1620 SingleEmissionCheckScope guard(this);
1621 fcvtzu(rd, vn, fbits);
1622 }
Fdiv(const VRegister & vd,const VRegister & vn,const VRegister & vm)1623 void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1624 VIXL_ASSERT(allow_macro_instructions_);
1625 SingleEmissionCheckScope guard(this);
1626 fdiv(vd, vn, vm);
1627 }
Fmax(const VRegister & vd,const VRegister & vn,const VRegister & vm)1628 void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1629 VIXL_ASSERT(allow_macro_instructions_);
1630 SingleEmissionCheckScope guard(this);
1631 fmax(vd, vn, vm);
1632 }
Fmaxnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1633 void Fmaxnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1634 VIXL_ASSERT(allow_macro_instructions_);
1635 SingleEmissionCheckScope guard(this);
1636 fmaxnm(vd, vn, vm);
1637 }
Fmin(const VRegister & vd,const VRegister & vn,const VRegister & vm)1638 void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1639 VIXL_ASSERT(allow_macro_instructions_);
1640 SingleEmissionCheckScope guard(this);
1641 fmin(vd, vn, vm);
1642 }
Fminnm(const VRegister & vd,const VRegister & vn,const VRegister & vm)1643 void Fminnm(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1644 VIXL_ASSERT(allow_macro_instructions_);
1645 SingleEmissionCheckScope guard(this);
1646 fminnm(vd, vn, vm);
1647 }
Fmov(const VRegister & vd,const VRegister & vn)1648 void Fmov(const VRegister& vd, const VRegister& vn) {
1649 VIXL_ASSERT(allow_macro_instructions_);
1650 SingleEmissionCheckScope guard(this);
1651 // TODO: Use DiscardMoveMode to allow this move to be elided if vd.Is(vn).
1652 fmov(vd, vn);
1653 }
Fmov(const VRegister & vd,const Register & rn)1654 void Fmov(const VRegister& vd, const Register& rn) {
1655 VIXL_ASSERT(allow_macro_instructions_);
1656 VIXL_ASSERT(!rn.IsZero());
1657 SingleEmissionCheckScope guard(this);
1658 fmov(vd, rn);
1659 }
Fmov(const VRegister & vd,int index,const Register & rn)1660 void Fmov(const VRegister& vd, int index, const Register& rn) {
1661 VIXL_ASSERT(allow_macro_instructions_);
1662 SingleEmissionCheckScope guard(this);
1663 if (vd.Is1D() && (index == 0)) {
1664 mov(vd, index, rn);
1665 } else {
1666 fmov(vd, index, rn);
1667 }
1668 }
Fmov(const Register & rd,const VRegister & vn,int index)1669 void Fmov(const Register& rd, const VRegister& vn, int index) {
1670 VIXL_ASSERT(allow_macro_instructions_);
1671 SingleEmissionCheckScope guard(this);
1672 if (vn.Is1D() && (index == 0)) {
1673 mov(rd, vn, index);
1674 } else {
1675 fmov(rd, vn, index);
1676 }
1677 }
1678
1679 // Provide explicit double and float interfaces for FP immediate moves, rather
1680 // than relying on implicit C++ casts. This allows signalling NaNs to be
1681 // preserved when the immediate matches the format of vd. Most systems convert
1682 // signalling NaNs to quiet NaNs when converting between float and double.
1683 void Fmov(VRegister vd, double imm);
1684 void Fmov(VRegister vd, float imm);
1685 void Fmov(VRegister vd, const Float16 imm);
1686 // Provide a template to allow other types to be converted automatically.
1687 template <typename T>
Fmov(VRegister vd,T imm)1688 void Fmov(VRegister vd, T imm) {
1689 VIXL_ASSERT(allow_macro_instructions_);
1690 Fmov(vd, static_cast<double>(imm));
1691 }
Fmov(Register rd,VRegister vn)1692 void Fmov(Register rd, VRegister vn) {
1693 VIXL_ASSERT(allow_macro_instructions_);
1694 VIXL_ASSERT(!rd.IsZero());
1695 SingleEmissionCheckScope guard(this);
1696 fmov(rd, vn);
1697 }
Fmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1698 void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1699 VIXL_ASSERT(allow_macro_instructions_);
1700 SingleEmissionCheckScope guard(this);
1701 fmul(vd, vn, vm);
1702 }
Fnmul(const VRegister & vd,const VRegister & vn,const VRegister & vm)1703 void Fnmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1704 VIXL_ASSERT(allow_macro_instructions_);
1705 SingleEmissionCheckScope guard(this);
1706 fnmul(vd, vn, vm);
1707 }
Fmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1708 void Fmadd(const VRegister& vd,
1709 const VRegister& vn,
1710 const VRegister& vm,
1711 const VRegister& va) {
1712 VIXL_ASSERT(allow_macro_instructions_);
1713 SingleEmissionCheckScope guard(this);
1714 fmadd(vd, vn, vm, va);
1715 }
Fmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1716 void Fmsub(const VRegister& vd,
1717 const VRegister& vn,
1718 const VRegister& vm,
1719 const VRegister& va) {
1720 VIXL_ASSERT(allow_macro_instructions_);
1721 SingleEmissionCheckScope guard(this);
1722 fmsub(vd, vn, vm, va);
1723 }
Fnmadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1724 void Fnmadd(const VRegister& vd,
1725 const VRegister& vn,
1726 const VRegister& vm,
1727 const VRegister& va) {
1728 VIXL_ASSERT(allow_macro_instructions_);
1729 SingleEmissionCheckScope guard(this);
1730 fnmadd(vd, vn, vm, va);
1731 }
Fnmsub(const VRegister & vd,const VRegister & vn,const VRegister & vm,const VRegister & va)1732 void Fnmsub(const VRegister& vd,
1733 const VRegister& vn,
1734 const VRegister& vm,
1735 const VRegister& va) {
1736 VIXL_ASSERT(allow_macro_instructions_);
1737 SingleEmissionCheckScope guard(this);
1738 fnmsub(vd, vn, vm, va);
1739 }
Fsub(const VRegister & vd,const VRegister & vn,const VRegister & vm)1740 void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
1741 VIXL_ASSERT(allow_macro_instructions_);
1742 SingleEmissionCheckScope guard(this);
1743 fsub(vd, vn, vm);
1744 }
Hint(SystemHint code)1745 void Hint(SystemHint code) {
1746 VIXL_ASSERT(allow_macro_instructions_);
1747 SingleEmissionCheckScope guard(this);
1748 hint(code);
1749 }
Hint(int imm7)1750 void Hint(int imm7) {
1751 VIXL_ASSERT(allow_macro_instructions_);
1752 SingleEmissionCheckScope guard(this);
1753 hint(imm7);
1754 }
Hlt(int code)1755 void Hlt(int code) {
1756 VIXL_ASSERT(allow_macro_instructions_);
1757 SingleEmissionCheckScope guard(this);
1758 hlt(code);
1759 }
Isb()1760 void Isb() {
1761 VIXL_ASSERT(allow_macro_instructions_);
1762 SingleEmissionCheckScope guard(this);
1763 isb();
1764 }
Ldar(const Register & rt,const MemOperand & src)1765 void Ldar(const Register& rt, const MemOperand& src) {
1766 VIXL_ASSERT(allow_macro_instructions_);
1767 SingleEmissionCheckScope guard(this);
1768 ldar(rt, src);
1769 }
Ldarb(const Register & rt,const MemOperand & src)1770 void Ldarb(const Register& rt, const MemOperand& src) {
1771 VIXL_ASSERT(allow_macro_instructions_);
1772 SingleEmissionCheckScope guard(this);
1773 ldarb(rt, src);
1774 }
Ldarh(const Register & rt,const MemOperand & src)1775 void Ldarh(const Register& rt, const MemOperand& src) {
1776 VIXL_ASSERT(allow_macro_instructions_);
1777 SingleEmissionCheckScope guard(this);
1778 ldarh(rt, src);
1779 }
Ldlar(const Register & rt,const MemOperand & src)1780 void Ldlar(const Register& rt, const MemOperand& src) {
1781 VIXL_ASSERT(allow_macro_instructions_);
1782 SingleEmissionCheckScope guard(this);
1783 ldlar(rt, src);
1784 }
Ldlarb(const Register & rt,const MemOperand & src)1785 void Ldlarb(const Register& rt, const MemOperand& src) {
1786 VIXL_ASSERT(allow_macro_instructions_);
1787 SingleEmissionCheckScope guard(this);
1788 ldlarb(rt, src);
1789 }
Ldlarh(const Register & rt,const MemOperand & src)1790 void Ldlarh(const Register& rt, const MemOperand& src) {
1791 VIXL_ASSERT(allow_macro_instructions_);
1792 SingleEmissionCheckScope guard(this);
1793 ldlarh(rt, src);
1794 }
Ldaxp(const Register & rt,const Register & rt2,const MemOperand & src)1795 void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
1796 VIXL_ASSERT(allow_macro_instructions_);
1797 VIXL_ASSERT(!rt.Aliases(rt2));
1798 SingleEmissionCheckScope guard(this);
1799 ldaxp(rt, rt2, src);
1800 }
Ldaxr(const Register & rt,const MemOperand & src)1801 void Ldaxr(const Register& rt, const MemOperand& src) {
1802 VIXL_ASSERT(allow_macro_instructions_);
1803 SingleEmissionCheckScope guard(this);
1804 ldaxr(rt, src);
1805 }
Ldaxrb(const Register & rt,const MemOperand & src)1806 void Ldaxrb(const Register& rt, const MemOperand& src) {
1807 VIXL_ASSERT(allow_macro_instructions_);
1808 SingleEmissionCheckScope guard(this);
1809 ldaxrb(rt, src);
1810 }
Ldaxrh(const Register & rt,const MemOperand & src)1811 void Ldaxrh(const Register& rt, const MemOperand& src) {
1812 VIXL_ASSERT(allow_macro_instructions_);
1813 SingleEmissionCheckScope guard(this);
1814 ldaxrh(rt, src);
1815 }
1816
1817 // clang-format off
1818 #define COMPARE_AND_SWAP_SINGLE_MACRO_LIST(V) \
1819 V(cas, Cas) \
1820 V(casa, Casa) \
1821 V(casl, Casl) \
1822 V(casal, Casal) \
1823 V(casb, Casb) \
1824 V(casab, Casab) \
1825 V(caslb, Caslb) \
1826 V(casalb, Casalb) \
1827 V(cash, Cash) \
1828 V(casah, Casah) \
1829 V(caslh, Caslh) \
1830 V(casalh, Casalh)
1831 // clang-format on
1832
1833 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
1834 void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
1835 VIXL_ASSERT(allow_macro_instructions_); \
1836 SingleEmissionCheckScope guard(this); \
1837 ASM(rs, rt, src); \
1838 }
1839 COMPARE_AND_SWAP_SINGLE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1840 #undef DEFINE_MACRO_ASM_FUNC
1841
1842
1843 // clang-format off
1844 #define COMPARE_AND_SWAP_PAIR_MACRO_LIST(V) \
1845 V(casp, Casp) \
1846 V(caspa, Caspa) \
1847 V(caspl, Caspl) \
1848 V(caspal, Caspal)
1849 // clang-format on
1850
1851 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
1852 void MASM(const Register& rs, \
1853 const Register& rs2, \
1854 const Register& rt, \
1855 const Register& rt2, \
1856 const MemOperand& src) { \
1857 VIXL_ASSERT(allow_macro_instructions_); \
1858 SingleEmissionCheckScope guard(this); \
1859 ASM(rs, rs2, rt, rt2, src); \
1860 }
COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)1861 COMPARE_AND_SWAP_PAIR_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
1862 #undef DEFINE_MACRO_ASM_FUNC
1863
1864 // These macros generate all the variations of the atomic memory operations,
1865 // e.g. ldadd, ldadda, ldaddb, staddl, etc.
1866
1867 // clang-format off
1868 #define ATOMIC_MEMORY_SIMPLE_MACRO_LIST(V, DEF, MASM_PRE, ASM_PRE) \
1869 V(DEF, MASM_PRE##add, ASM_PRE##add) \
1870 V(DEF, MASM_PRE##clr, ASM_PRE##clr) \
1871 V(DEF, MASM_PRE##eor, ASM_PRE##eor) \
1872 V(DEF, MASM_PRE##set, ASM_PRE##set) \
1873 V(DEF, MASM_PRE##smax, ASM_PRE##smax) \
1874 V(DEF, MASM_PRE##smin, ASM_PRE##smin) \
1875 V(DEF, MASM_PRE##umax, ASM_PRE##umax) \
1876 V(DEF, MASM_PRE##umin, ASM_PRE##umin)
1877
1878 #define ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
1879 V(MASM, ASM) \
1880 V(MASM##l, ASM##l) \
1881 V(MASM##b, ASM##b) \
1882 V(MASM##lb, ASM##lb) \
1883 V(MASM##h, ASM##h) \
1884 V(MASM##lh, ASM##lh)
1885
1886 #define ATOMIC_MEMORY_LOAD_MACRO_MODES(V, MASM, ASM) \
1887 ATOMIC_MEMORY_STORE_MACRO_MODES(V, MASM, ASM) \
1888 V(MASM##a, ASM##a) \
1889 V(MASM##al, ASM##al) \
1890 V(MASM##ab, ASM##ab) \
1891 V(MASM##alb, ASM##alb) \
1892 V(MASM##ah, ASM##ah) \
1893 V(MASM##alh, ASM##alh)
1894 // clang-format on
1895
1896 #define DEFINE_MACRO_LOAD_ASM_FUNC(MASM, ASM) \
1897 void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
1898 VIXL_ASSERT(allow_macro_instructions_); \
1899 SingleEmissionCheckScope guard(this); \
1900 ASM(rs, rt, src); \
1901 }
1902 #define DEFINE_MACRO_STORE_ASM_FUNC(MASM, ASM) \
1903 void MASM(const Register& rs, const MemOperand& src) { \
1904 VIXL_ASSERT(allow_macro_instructions_); \
1905 SingleEmissionCheckScope guard(this); \
1906 ASM(rs, src); \
1907 }
1908
1909 ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_LOAD_MACRO_MODES,
1910 DEFINE_MACRO_LOAD_ASM_FUNC,
1911 Ld,
1912 ld)
1913 ATOMIC_MEMORY_SIMPLE_MACRO_LIST(ATOMIC_MEMORY_STORE_MACRO_MODES,
1914 DEFINE_MACRO_STORE_ASM_FUNC,
1915 St,
1916 st)
1917
1918 #define DEFINE_MACRO_SWP_ASM_FUNC(MASM, ASM) \
1919 void MASM(const Register& rs, const Register& rt, const MemOperand& src) { \
1920 VIXL_ASSERT(allow_macro_instructions_); \
1921 SingleEmissionCheckScope guard(this); \
1922 ASM(rs, rt, src); \
1923 }
1924
1925 ATOMIC_MEMORY_LOAD_MACRO_MODES(DEFINE_MACRO_SWP_ASM_FUNC, Swp, swp)
1926
1927 #undef DEFINE_MACRO_LOAD_ASM_FUNC
1928 #undef DEFINE_MACRO_STORE_ASM_FUNC
1929 #undef DEFINE_MACRO_SWP_ASM_FUNC
1930
1931 void Ldaprb(const Register& rt, const MemOperand& src) {
1932 VIXL_ASSERT(allow_macro_instructions_);
1933 SingleEmissionCheckScope guard(this);
1934 VIXL_ASSERT(src.IsImmediateOffset());
1935 if (src.GetOffset() == 0) {
1936 ldaprb(rt, src);
1937 } else {
1938 ldapurb(rt, src);
1939 }
1940 }
1941
Ldapursb(const Register & rt,const MemOperand & src)1942 void Ldapursb(const Register& rt, const MemOperand& src) {
1943 VIXL_ASSERT(allow_macro_instructions_);
1944 SingleEmissionCheckScope guard(this);
1945 ldapursb(rt, src);
1946 }
1947
Ldaprh(const Register & rt,const MemOperand & src)1948 void Ldaprh(const Register& rt, const MemOperand& src) {
1949 VIXL_ASSERT(allow_macro_instructions_);
1950 SingleEmissionCheckScope guard(this);
1951 VIXL_ASSERT(src.IsImmediateOffset());
1952 if (src.GetOffset() == 0) {
1953 ldaprh(rt, src);
1954 } else {
1955 ldapurh(rt, src);
1956 }
1957 }
1958
Ldapursh(const Register & rt,const MemOperand & src)1959 void Ldapursh(const Register& rt, const MemOperand& src) {
1960 VIXL_ASSERT(allow_macro_instructions_);
1961 SingleEmissionCheckScope guard(this);
1962 ldapursh(rt, src);
1963 }
1964
Ldapr(const Register & rt,const MemOperand & src)1965 void Ldapr(const Register& rt, const MemOperand& src) {
1966 VIXL_ASSERT(allow_macro_instructions_);
1967 SingleEmissionCheckScope guard(this);
1968 VIXL_ASSERT(src.IsImmediateOffset());
1969 if (src.GetOffset() == 0) {
1970 ldapr(rt, src);
1971 } else {
1972 ldapur(rt, src);
1973 }
1974 }
1975
Ldapursw(const Register & rt,const MemOperand & src)1976 void Ldapursw(const Register& rt, const MemOperand& src) {
1977 VIXL_ASSERT(allow_macro_instructions_);
1978 SingleEmissionCheckScope guard(this);
1979 ldapursw(rt, src);
1980 }
1981
Ldnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & src)1982 void Ldnp(const CPURegister& rt,
1983 const CPURegister& rt2,
1984 const MemOperand& src) {
1985 VIXL_ASSERT(allow_macro_instructions_);
1986 SingleEmissionCheckScope guard(this);
1987 ldnp(rt, rt2, src);
1988 }
1989 // Provide both double and float interfaces for FP immediate loads, rather
1990 // than relying on implicit C++ casts. This allows signalling NaNs to be
1991 // preserved when the immediate matches the format of fd. Most systems convert
1992 // signalling NaNs to quiet NaNs when converting between float and double.
Ldr(const VRegister & vt,double imm)1993 void Ldr(const VRegister& vt, double imm) {
1994 VIXL_ASSERT(allow_macro_instructions_);
1995 SingleEmissionCheckScope guard(this);
1996 RawLiteral* literal;
1997 #ifndef PANDA_BUILD
1998 if (vt.IsD()) {
1999 literal = new Literal<double>(imm,
2000 &literal_pool_,
2001 RawLiteral::kDeletedOnPlacementByPool);
2002 } else {
2003 literal = new Literal<float>(static_cast<float>(imm),
2004 &literal_pool_,
2005 RawLiteral::kDeletedOnPlacementByPool);
2006 }
2007 #else
2008 if (vt.IsD()) {
2009 literal = allocator_.New<Literal<double>>(imm,
2010 &literal_pool_,
2011 RawLiteral::kDeletedOnPlacementByPool);
2012 } else {
2013 literal = allocator_.New<Literal<float>>(static_cast<float>(imm),
2014 &literal_pool_,
2015 RawLiteral::kDeletedOnPlacementByPool);
2016 }
2017 #endif
2018 ldr(vt, literal);
2019 }
Ldr(const VRegister & vt,float imm)2020 void Ldr(const VRegister& vt, float imm) {
2021 VIXL_ASSERT(allow_macro_instructions_);
2022 SingleEmissionCheckScope guard(this);
2023 RawLiteral* literal;
2024 #ifndef PANDA_BUILD
2025 if (vt.IsS()) {
2026 literal = new Literal<float>(imm,
2027 &literal_pool_,
2028 RawLiteral::kDeletedOnPlacementByPool);
2029 } else {
2030 literal = new Literal<double>(static_cast<double>(imm),
2031 &literal_pool_,
2032 RawLiteral::kDeletedOnPlacementByPool);
2033 }
2034 #else
2035 if (vt.IsS()) {
2036 literal = allocator_.New<Literal<float>>(imm,
2037 &literal_pool_,
2038 RawLiteral::kDeletedOnPlacementByPool);
2039 } else {
2040 literal = allocator_.New<Literal<double>>(static_cast<double>(imm),
2041 &literal_pool_,
2042 RawLiteral::kDeletedOnPlacementByPool);
2043 }
2044 #endif
2045 ldr(vt, literal);
2046 }
Ldr(const VRegister & vt,uint64_t high64,uint64_t low64)2047 void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
2048 VIXL_ASSERT(allow_macro_instructions_);
2049 VIXL_ASSERT(vt.IsQ());
2050 SingleEmissionCheckScope guard(this);
2051 #ifndef PANDA_BUILD
2052 ldr(vt,
2053 new Literal<uint64_t>(high64,
2054 low64,
2055 &literal_pool_,
2056 RawLiteral::kDeletedOnPlacementByPool));
2057 #else
2058 ldr(vt,
2059 allocator_.New<Literal<uint64_t>>(high64,
2060 low64,
2061 &literal_pool_,
2062 RawLiteral::kDeletedOnPlacementByPool));
2063 #endif
2064 }
Ldr(const Register & rt,uint64_t imm)2065 void Ldr(const Register& rt, uint64_t imm) {
2066 VIXL_ASSERT(allow_macro_instructions_);
2067 VIXL_ASSERT(!rt.IsZero());
2068 SingleEmissionCheckScope guard(this);
2069 RawLiteral* literal;
2070 #ifndef PANDA_BUILD
2071 if (rt.Is64Bits()) {
2072 literal = new Literal<uint64_t>(imm,
2073 &literal_pool_,
2074 RawLiteral::kDeletedOnPlacementByPool);
2075 } else {
2076 VIXL_ASSERT(rt.Is32Bits());
2077 VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
2078 literal = new Literal<uint32_t>(static_cast<uint32_t>(imm),
2079 &literal_pool_,
2080 RawLiteral::kDeletedOnPlacementByPool);
2081 }
2082 #else
2083 if (rt.Is64Bits()) {
2084 literal = allocator_.New<Literal<uint64_t>>(imm,
2085 &literal_pool_,
2086 RawLiteral::kDeletedOnPlacementByPool);
2087 } else {
2088 VIXL_ASSERT(rt.Is32Bits());
2089 VIXL_ASSERT(IsUint32(imm) || IsInt32(imm));
2090 literal = allocator_.New<Literal<uint32_t>>(static_cast<uint32_t>(imm),
2091 &literal_pool_,
2092 RawLiteral::kDeletedOnPlacementByPool);
2093 }
2094 #endif
2095 ldr(rt, literal);
2096 }
Ldrsw(const Register & rt,uint32_t imm)2097 void Ldrsw(const Register& rt, uint32_t imm) {
2098 VIXL_ASSERT(allow_macro_instructions_);
2099 VIXL_ASSERT(!rt.IsZero());
2100 SingleEmissionCheckScope guard(this);
2101 #ifndef PANDA_BUILD
2102 ldrsw(rt,
2103 new Literal<uint32_t>(imm,
2104 &literal_pool_,
2105 RawLiteral::kDeletedOnPlacementByPool));
2106 #else
2107 ldrsw(rt,
2108 allocator_.New<Literal<uint32_t>>(imm,
2109 &literal_pool_,
2110 RawLiteral::kDeletedOnPlacementByPool));
2111 #endif
2112 }
Ldr(const CPURegister & rt,RawLiteral * literal)2113 void Ldr(const CPURegister& rt, RawLiteral* literal) {
2114 VIXL_ASSERT(allow_macro_instructions_);
2115 SingleEmissionCheckScope guard(this);
2116 ldr(rt, literal);
2117 }
Ldrsw(const Register & rt,RawLiteral * literal)2118 void Ldrsw(const Register& rt, RawLiteral* literal) {
2119 VIXL_ASSERT(allow_macro_instructions_);
2120 SingleEmissionCheckScope guard(this);
2121 ldrsw(rt, literal);
2122 }
Ldxp(const Register & rt,const Register & rt2,const MemOperand & src)2123 void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
2124 VIXL_ASSERT(allow_macro_instructions_);
2125 VIXL_ASSERT(!rt.Aliases(rt2));
2126 SingleEmissionCheckScope guard(this);
2127 ldxp(rt, rt2, src);
2128 }
Ldxr(const Register & rt,const MemOperand & src)2129 void Ldxr(const Register& rt, const MemOperand& src) {
2130 VIXL_ASSERT(allow_macro_instructions_);
2131 SingleEmissionCheckScope guard(this);
2132 ldxr(rt, src);
2133 }
Ldxrb(const Register & rt,const MemOperand & src)2134 void Ldxrb(const Register& rt, const MemOperand& src) {
2135 VIXL_ASSERT(allow_macro_instructions_);
2136 SingleEmissionCheckScope guard(this);
2137 ldxrb(rt, src);
2138 }
Ldxrh(const Register & rt,const MemOperand & src)2139 void Ldxrh(const Register& rt, const MemOperand& src) {
2140 VIXL_ASSERT(allow_macro_instructions_);
2141 SingleEmissionCheckScope guard(this);
2142 ldxrh(rt, src);
2143 }
Lsl(const Register & rd,const Register & rn,unsigned shift)2144 void Lsl(const Register& rd, const Register& rn, unsigned shift) {
2145 VIXL_ASSERT(allow_macro_instructions_);
2146 VIXL_ASSERT(!rd.IsZero());
2147 VIXL_ASSERT(!rn.IsZero());
2148 SingleEmissionCheckScope guard(this);
2149 lsl(rd, rn, shift);
2150 }
Lsl(const Register & rd,const Register & rn,const Register & rm)2151 void Lsl(const Register& rd, const Register& rn, const Register& rm) {
2152 VIXL_ASSERT(allow_macro_instructions_);
2153 VIXL_ASSERT(!rd.IsZero());
2154 VIXL_ASSERT(!rn.IsZero());
2155 VIXL_ASSERT(!rm.IsZero());
2156 SingleEmissionCheckScope guard(this);
2157 lslv(rd, rn, rm);
2158 }
Lsr(const Register & rd,const Register & rn,unsigned shift)2159 void Lsr(const Register& rd, const Register& rn, unsigned shift) {
2160 VIXL_ASSERT(allow_macro_instructions_);
2161 VIXL_ASSERT(!rd.IsZero());
2162 VIXL_ASSERT(!rn.IsZero());
2163 SingleEmissionCheckScope guard(this);
2164 lsr(rd, rn, shift);
2165 }
Lsr(const Register & rd,const Register & rn,const Register & rm)2166 void Lsr(const Register& rd, const Register& rn, const Register& rm) {
2167 VIXL_ASSERT(allow_macro_instructions_);
2168 VIXL_ASSERT(!rd.IsZero());
2169 VIXL_ASSERT(!rn.IsZero());
2170 VIXL_ASSERT(!rm.IsZero());
2171 SingleEmissionCheckScope guard(this);
2172 lsrv(rd, rn, rm);
2173 }
Ldraa(const Register & xt,const MemOperand & src)2174 void Ldraa(const Register& xt, const MemOperand& src) {
2175 VIXL_ASSERT(allow_macro_instructions_);
2176 SingleEmissionCheckScope guard(this);
2177 ldraa(xt, src);
2178 }
Ldrab(const Register & xt,const MemOperand & src)2179 void Ldrab(const Register& xt, const MemOperand& src) {
2180 VIXL_ASSERT(allow_macro_instructions_);
2181 SingleEmissionCheckScope guard(this);
2182 ldrab(xt, src);
2183 }
Madd(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2184 void Madd(const Register& rd,
2185 const Register& rn,
2186 const Register& rm,
2187 const Register& ra) {
2188 VIXL_ASSERT(allow_macro_instructions_);
2189 VIXL_ASSERT(!rd.IsZero());
2190 VIXL_ASSERT(!rn.IsZero());
2191 VIXL_ASSERT(!rm.IsZero());
2192 VIXL_ASSERT(!ra.IsZero());
2193 SingleEmissionCheckScope guard(this);
2194 madd(rd, rn, rm, ra);
2195 }
Mneg(const Register & rd,const Register & rn,const Register & rm)2196 void Mneg(const Register& rd, const Register& rn, const Register& rm) {
2197 VIXL_ASSERT(allow_macro_instructions_);
2198 VIXL_ASSERT(!rd.IsZero());
2199 VIXL_ASSERT(!rn.IsZero());
2200 VIXL_ASSERT(!rm.IsZero());
2201 SingleEmissionCheckScope guard(this);
2202 mneg(rd, rn, rm);
2203 }
2204 void Mov(const Register& rd,
2205 const Register& rn,
2206 DiscardMoveMode discard_mode = kDontDiscardForSameWReg) {
2207 VIXL_ASSERT(allow_macro_instructions_);
2208 // Emit a register move only if the registers are distinct, or if they are
2209 // not X registers.
2210 //
2211 // Note that mov(w0, w0) is not a no-op because it clears the top word of
2212 // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
2213 // registers is not required to clear the top word of the X register. In
2214 // this case, the instruction is discarded.
2215 //
2216 // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
2217 if (!rd.Is(rn) ||
2218 (rd.Is32Bits() && (discard_mode == kDontDiscardForSameWReg))) {
2219 SingleEmissionCheckScope guard(this);
2220 mov(rd, rn);
2221 }
2222 }
2223 void Movk(const Register& rd, uint64_t imm, int shift = -1) {
2224 VIXL_ASSERT(allow_macro_instructions_);
2225 VIXL_ASSERT(!rd.IsZero());
2226 SingleEmissionCheckScope guard(this);
2227 movk(rd, imm, shift);
2228 }
Mrs(const Register & rt,SystemRegister sysreg)2229 void Mrs(const Register& rt, SystemRegister sysreg) {
2230 VIXL_ASSERT(allow_macro_instructions_);
2231 VIXL_ASSERT(!rt.IsZero());
2232 SingleEmissionCheckScope guard(this);
2233 mrs(rt, sysreg);
2234 }
Msr(SystemRegister sysreg,const Register & rt)2235 void Msr(SystemRegister sysreg, const Register& rt) {
2236 VIXL_ASSERT(allow_macro_instructions_);
2237 VIXL_ASSERT(!rt.IsZero());
2238 SingleEmissionCheckScope guard(this);
2239 msr(sysreg, rt);
2240 }
Cfinv()2241 void Cfinv() {
2242 VIXL_ASSERT(allow_macro_instructions_);
2243 SingleEmissionCheckScope guard(this);
2244 cfinv();
2245 }
Axflag()2246 void Axflag() {
2247 VIXL_ASSERT(allow_macro_instructions_);
2248 SingleEmissionCheckScope guard(this);
2249 axflag();
2250 }
Xaflag()2251 void Xaflag() {
2252 VIXL_ASSERT(allow_macro_instructions_);
2253 SingleEmissionCheckScope guard(this);
2254 xaflag();
2255 }
2256 void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
2257 VIXL_ASSERT(allow_macro_instructions_);
2258 SingleEmissionCheckScope guard(this);
2259 sys(op1, crn, crm, op2, rt);
2260 }
Dc(DataCacheOp op,const Register & rt)2261 void Dc(DataCacheOp op, const Register& rt) {
2262 VIXL_ASSERT(allow_macro_instructions_);
2263 SingleEmissionCheckScope guard(this);
2264 dc(op, rt);
2265 }
Ic(InstructionCacheOp op,const Register & rt)2266 void Ic(InstructionCacheOp op, const Register& rt) {
2267 VIXL_ASSERT(allow_macro_instructions_);
2268 SingleEmissionCheckScope guard(this);
2269 ic(op, rt);
2270 }
Msub(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2271 void Msub(const Register& rd,
2272 const Register& rn,
2273 const Register& rm,
2274 const Register& ra) {
2275 VIXL_ASSERT(allow_macro_instructions_);
2276 VIXL_ASSERT(!rd.IsZero());
2277 VIXL_ASSERT(!rn.IsZero());
2278 VIXL_ASSERT(!rm.IsZero());
2279 VIXL_ASSERT(!ra.IsZero());
2280 SingleEmissionCheckScope guard(this);
2281 msub(rd, rn, rm, ra);
2282 }
Mul(const Register & rd,const Register & rn,const Register & rm)2283 void Mul(const Register& rd, const Register& rn, const Register& rm) {
2284 VIXL_ASSERT(allow_macro_instructions_);
2285 VIXL_ASSERT(!rd.IsZero());
2286 VIXL_ASSERT(!rn.IsZero());
2287 VIXL_ASSERT(!rm.IsZero());
2288 SingleEmissionCheckScope guard(this);
2289 mul(rd, rn, rm);
2290 }
Nop()2291 void Nop() {
2292 VIXL_ASSERT(allow_macro_instructions_);
2293 SingleEmissionCheckScope guard(this);
2294 nop();
2295 }
Rbit(const Register & rd,const Register & rn)2296 void Rbit(const Register& rd, const Register& rn) {
2297 VIXL_ASSERT(allow_macro_instructions_);
2298 VIXL_ASSERT(!rd.IsZero());
2299 VIXL_ASSERT(!rn.IsZero());
2300 SingleEmissionCheckScope guard(this);
2301 rbit(rd, rn);
2302 }
2303 void Ret(const Register& xn = lr) {
2304 VIXL_ASSERT(allow_macro_instructions_);
2305 VIXL_ASSERT(!xn.IsZero());
2306 SingleEmissionCheckScope guard(this);
2307 ret(xn);
2308 }
Rev(const Register & rd,const Register & rn)2309 void Rev(const Register& rd, const Register& rn) {
2310 VIXL_ASSERT(allow_macro_instructions_);
2311 VIXL_ASSERT(!rd.IsZero());
2312 VIXL_ASSERT(!rn.IsZero());
2313 SingleEmissionCheckScope guard(this);
2314 rev(rd, rn);
2315 }
Rev16(const Register & rd,const Register & rn)2316 void Rev16(const Register& rd, const Register& rn) {
2317 VIXL_ASSERT(allow_macro_instructions_);
2318 VIXL_ASSERT(!rd.IsZero());
2319 VIXL_ASSERT(!rn.IsZero());
2320 SingleEmissionCheckScope guard(this);
2321 rev16(rd, rn);
2322 }
Rev32(const Register & rd,const Register & rn)2323 void Rev32(const Register& rd, const Register& rn) {
2324 VIXL_ASSERT(allow_macro_instructions_);
2325 VIXL_ASSERT(!rd.IsZero());
2326 VIXL_ASSERT(!rn.IsZero());
2327 SingleEmissionCheckScope guard(this);
2328 rev32(rd, rn);
2329 }
Rev64(const Register & rd,const Register & rn)2330 void Rev64(const Register& rd, const Register& rn) {
2331 VIXL_ASSERT(allow_macro_instructions_);
2332 VIXL_ASSERT(!rd.IsZero());
2333 VIXL_ASSERT(!rn.IsZero());
2334 SingleEmissionCheckScope guard(this);
2335 rev64(rd, rn);
2336 }
2337
2338 #define PAUTH_MASM_VARIATIONS(V) \
2339 V(Paci, paci) \
2340 V(Pacd, pacd) \
2341 V(Auti, auti) \
2342 V(Autd, autd)
2343
2344 #define DEFINE_MACRO_ASM_FUNCS(MASM_PRE, ASM_PRE) \
2345 void MASM_PRE##a(const Register& xd, const Register& xn) { \
2346 VIXL_ASSERT(allow_macro_instructions_); \
2347 SingleEmissionCheckScope guard(this); \
2348 ASM_PRE##a(xd, xn); \
2349 } \
2350 void MASM_PRE##za(const Register& xd) { \
2351 VIXL_ASSERT(allow_macro_instructions_); \
2352 SingleEmissionCheckScope guard(this); \
2353 ASM_PRE##za(xd); \
2354 } \
2355 void MASM_PRE##b(const Register& xd, const Register& xn) { \
2356 VIXL_ASSERT(allow_macro_instructions_); \
2357 SingleEmissionCheckScope guard(this); \
2358 ASM_PRE##b(xd, xn); \
2359 } \
2360 void MASM_PRE##zb(const Register& xd) { \
2361 VIXL_ASSERT(allow_macro_instructions_); \
2362 SingleEmissionCheckScope guard(this); \
2363 ASM_PRE##zb(xd); \
2364 }
2365
PAUTH_MASM_VARIATIONS(DEFINE_MACRO_ASM_FUNCS)2366 PAUTH_MASM_VARIATIONS(DEFINE_MACRO_ASM_FUNCS)
2367 #undef DEFINE_MACRO_ASM_FUNCS
2368
2369 void Pacga(const Register& xd, const Register& xn, const Register& xm) {
2370 VIXL_ASSERT(allow_macro_instructions_);
2371 SingleEmissionCheckScope guard(this);
2372 pacga(xd, xn, xm);
2373 }
2374
Xpaci(const Register & xd)2375 void Xpaci(const Register& xd) {
2376 VIXL_ASSERT(allow_macro_instructions_);
2377 SingleEmissionCheckScope guard(this);
2378 xpaci(xd);
2379 }
2380
Xpacd(const Register & xd)2381 void Xpacd(const Register& xd) {
2382 VIXL_ASSERT(allow_macro_instructions_);
2383 SingleEmissionCheckScope guard(this);
2384 xpacd(xd);
2385 }
Ror(const Register & rd,const Register & rs,unsigned shift)2386 void Ror(const Register& rd, const Register& rs, unsigned shift) {
2387 VIXL_ASSERT(allow_macro_instructions_);
2388 VIXL_ASSERT(!rd.IsZero());
2389 VIXL_ASSERT(!rs.IsZero());
2390 SingleEmissionCheckScope guard(this);
2391 ror(rd, rs, shift);
2392 }
Ror(const Register & rd,const Register & rn,const Register & rm)2393 void Ror(const Register& rd, const Register& rn, const Register& rm) {
2394 VIXL_ASSERT(allow_macro_instructions_);
2395 VIXL_ASSERT(!rd.IsZero());
2396 VIXL_ASSERT(!rn.IsZero());
2397 VIXL_ASSERT(!rm.IsZero());
2398 SingleEmissionCheckScope guard(this);
2399 rorv(rd, rn, rm);
2400 }
Sbfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2401 void Sbfiz(const Register& rd,
2402 const Register& rn,
2403 unsigned lsb,
2404 unsigned width) {
2405 VIXL_ASSERT(allow_macro_instructions_);
2406 VIXL_ASSERT(!rd.IsZero());
2407 VIXL_ASSERT(!rn.IsZero());
2408 SingleEmissionCheckScope guard(this);
2409 sbfiz(rd, rn, lsb, width);
2410 }
Sbfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2411 void Sbfm(const Register& rd,
2412 const Register& rn,
2413 unsigned immr,
2414 unsigned imms) {
2415 VIXL_ASSERT(allow_macro_instructions_);
2416 VIXL_ASSERT(!rd.IsZero());
2417 VIXL_ASSERT(!rn.IsZero());
2418 SingleEmissionCheckScope guard(this);
2419 sbfm(rd, rn, immr, imms);
2420 }
Sbfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2421 void Sbfx(const Register& rd,
2422 const Register& rn,
2423 unsigned lsb,
2424 unsigned width) {
2425 VIXL_ASSERT(allow_macro_instructions_);
2426 VIXL_ASSERT(!rd.IsZero());
2427 VIXL_ASSERT(!rn.IsZero());
2428 SingleEmissionCheckScope guard(this);
2429 sbfx(rd, rn, lsb, width);
2430 }
2431 void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2432 VIXL_ASSERT(allow_macro_instructions_);
2433 VIXL_ASSERT(!rn.IsZero());
2434 SingleEmissionCheckScope guard(this);
2435 scvtf(vd, rn, fbits);
2436 }
Sdiv(const Register & rd,const Register & rn,const Register & rm)2437 void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
2438 VIXL_ASSERT(allow_macro_instructions_);
2439 VIXL_ASSERT(!rd.IsZero());
2440 VIXL_ASSERT(!rn.IsZero());
2441 VIXL_ASSERT(!rm.IsZero());
2442 SingleEmissionCheckScope guard(this);
2443 sdiv(rd, rn, rm);
2444 }
Smaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2445 void Smaddl(const Register& rd,
2446 const Register& rn,
2447 const Register& rm,
2448 const Register& ra) {
2449 VIXL_ASSERT(allow_macro_instructions_);
2450 VIXL_ASSERT(!rd.IsZero());
2451 VIXL_ASSERT(!rn.IsZero());
2452 VIXL_ASSERT(!rm.IsZero());
2453 VIXL_ASSERT(!ra.IsZero());
2454 SingleEmissionCheckScope guard(this);
2455 smaddl(rd, rn, rm, ra);
2456 }
Smsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2457 void Smsubl(const Register& rd,
2458 const Register& rn,
2459 const Register& rm,
2460 const Register& ra) {
2461 VIXL_ASSERT(allow_macro_instructions_);
2462 VIXL_ASSERT(!rd.IsZero());
2463 VIXL_ASSERT(!rn.IsZero());
2464 VIXL_ASSERT(!rm.IsZero());
2465 VIXL_ASSERT(!ra.IsZero());
2466 SingleEmissionCheckScope guard(this);
2467 smsubl(rd, rn, rm, ra);
2468 }
Smull(const Register & rd,const Register & rn,const Register & rm)2469 void Smull(const Register& rd, const Register& rn, const Register& rm) {
2470 VIXL_ASSERT(allow_macro_instructions_);
2471 VIXL_ASSERT(!rd.IsZero());
2472 VIXL_ASSERT(!rn.IsZero());
2473 VIXL_ASSERT(!rm.IsZero());
2474 SingleEmissionCheckScope guard(this);
2475 smull(rd, rn, rm);
2476 }
Smulh(const Register & xd,const Register & xn,const Register & xm)2477 void Smulh(const Register& xd, const Register& xn, const Register& xm) {
2478 VIXL_ASSERT(allow_macro_instructions_);
2479 VIXL_ASSERT(!xd.IsZero());
2480 VIXL_ASSERT(!xn.IsZero());
2481 VIXL_ASSERT(!xm.IsZero());
2482 SingleEmissionCheckScope guard(this);
2483 smulh(xd, xn, xm);
2484 }
Stlr(const Register & rt,const MemOperand & dst)2485 void Stlr(const Register& rt, const MemOperand& dst) {
2486 VIXL_ASSERT(allow_macro_instructions_);
2487 SingleEmissionCheckScope guard(this);
2488 VIXL_ASSERT(dst.IsImmediateOffset());
2489 if (dst.GetOffset() == 0) {
2490 stlr(rt, dst);
2491 } else {
2492 stlur(rt, dst);
2493 }
2494 }
Stlrb(const Register & rt,const MemOperand & dst)2495 void Stlrb(const Register& rt, const MemOperand& dst) {
2496 VIXL_ASSERT(allow_macro_instructions_);
2497 SingleEmissionCheckScope guard(this);
2498 VIXL_ASSERT(dst.IsImmediateOffset());
2499 if (dst.GetOffset() == 0) {
2500 stlrb(rt, dst);
2501 } else {
2502 stlurb(rt, dst);
2503 }
2504 }
Stlrh(const Register & rt,const MemOperand & dst)2505 void Stlrh(const Register& rt, const MemOperand& dst) {
2506 VIXL_ASSERT(allow_macro_instructions_);
2507 SingleEmissionCheckScope guard(this);
2508 VIXL_ASSERT(dst.IsImmediateOffset());
2509 if (dst.GetOffset() == 0) {
2510 stlrh(rt, dst);
2511 } else {
2512 stlurh(rt, dst);
2513 }
2514 }
Stllr(const Register & rt,const MemOperand & dst)2515 void Stllr(const Register& rt, const MemOperand& dst) {
2516 VIXL_ASSERT(allow_macro_instructions_);
2517 SingleEmissionCheckScope guard(this);
2518 stllr(rt, dst);
2519 }
Stllrb(const Register & rt,const MemOperand & dst)2520 void Stllrb(const Register& rt, const MemOperand& dst) {
2521 VIXL_ASSERT(allow_macro_instructions_);
2522 SingleEmissionCheckScope guard(this);
2523 stllrb(rt, dst);
2524 }
Stllrh(const Register & rt,const MemOperand & dst)2525 void Stllrh(const Register& rt, const MemOperand& dst) {
2526 VIXL_ASSERT(allow_macro_instructions_);
2527 SingleEmissionCheckScope guard(this);
2528 stllrh(rt, dst);
2529 }
Stlxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)2530 void Stlxp(const Register& rs,
2531 const Register& rt,
2532 const Register& rt2,
2533 const MemOperand& dst) {
2534 VIXL_ASSERT(allow_macro_instructions_);
2535 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2536 VIXL_ASSERT(!rs.Aliases(rt));
2537 VIXL_ASSERT(!rs.Aliases(rt2));
2538 SingleEmissionCheckScope guard(this);
2539 stlxp(rs, rt, rt2, dst);
2540 }
Stlxr(const Register & rs,const Register & rt,const MemOperand & dst)2541 void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
2542 VIXL_ASSERT(allow_macro_instructions_);
2543 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2544 VIXL_ASSERT(!rs.Aliases(rt));
2545 SingleEmissionCheckScope guard(this);
2546 stlxr(rs, rt, dst);
2547 }
Stlxrb(const Register & rs,const Register & rt,const MemOperand & dst)2548 void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
2549 VIXL_ASSERT(allow_macro_instructions_);
2550 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2551 VIXL_ASSERT(!rs.Aliases(rt));
2552 SingleEmissionCheckScope guard(this);
2553 stlxrb(rs, rt, dst);
2554 }
Stlxrh(const Register & rs,const Register & rt,const MemOperand & dst)2555 void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
2556 VIXL_ASSERT(allow_macro_instructions_);
2557 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2558 VIXL_ASSERT(!rs.Aliases(rt));
2559 SingleEmissionCheckScope guard(this);
2560 stlxrh(rs, rt, dst);
2561 }
Stnp(const CPURegister & rt,const CPURegister & rt2,const MemOperand & dst)2562 void Stnp(const CPURegister& rt,
2563 const CPURegister& rt2,
2564 const MemOperand& dst) {
2565 VIXL_ASSERT(allow_macro_instructions_);
2566 SingleEmissionCheckScope guard(this);
2567 stnp(rt, rt2, dst);
2568 }
Stxp(const Register & rs,const Register & rt,const Register & rt2,const MemOperand & dst)2569 void Stxp(const Register& rs,
2570 const Register& rt,
2571 const Register& rt2,
2572 const MemOperand& dst) {
2573 VIXL_ASSERT(allow_macro_instructions_);
2574 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2575 VIXL_ASSERT(!rs.Aliases(rt));
2576 VIXL_ASSERT(!rs.Aliases(rt2));
2577 SingleEmissionCheckScope guard(this);
2578 stxp(rs, rt, rt2, dst);
2579 }
Stxr(const Register & rs,const Register & rt,const MemOperand & dst)2580 void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
2581 VIXL_ASSERT(allow_macro_instructions_);
2582 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2583 VIXL_ASSERT(!rs.Aliases(rt));
2584 SingleEmissionCheckScope guard(this);
2585 stxr(rs, rt, dst);
2586 }
Stxrb(const Register & rs,const Register & rt,const MemOperand & dst)2587 void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
2588 VIXL_ASSERT(allow_macro_instructions_);
2589 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2590 VIXL_ASSERT(!rs.Aliases(rt));
2591 SingleEmissionCheckScope guard(this);
2592 stxrb(rs, rt, dst);
2593 }
Stxrh(const Register & rs,const Register & rt,const MemOperand & dst)2594 void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
2595 VIXL_ASSERT(allow_macro_instructions_);
2596 VIXL_ASSERT(!rs.Aliases(dst.GetBaseRegister()));
2597 VIXL_ASSERT(!rs.Aliases(rt));
2598 SingleEmissionCheckScope guard(this);
2599 stxrh(rs, rt, dst);
2600 }
Svc(int code)2601 void Svc(int code) {
2602 VIXL_ASSERT(allow_macro_instructions_);
2603 SingleEmissionCheckScope guard(this);
2604 svc(code);
2605 }
Sxtb(const Register & rd,const Register & rn)2606 void Sxtb(const Register& rd, const Register& rn) {
2607 VIXL_ASSERT(allow_macro_instructions_);
2608 VIXL_ASSERT(!rd.IsZero());
2609 VIXL_ASSERT(!rn.IsZero());
2610 SingleEmissionCheckScope guard(this);
2611 sxtb(rd, rn);
2612 }
Sxth(const Register & rd,const Register & rn)2613 void Sxth(const Register& rd, const Register& rn) {
2614 VIXL_ASSERT(allow_macro_instructions_);
2615 VIXL_ASSERT(!rd.IsZero());
2616 VIXL_ASSERT(!rn.IsZero());
2617 SingleEmissionCheckScope guard(this);
2618 sxth(rd, rn);
2619 }
Sxtw(const Register & rd,const Register & rn)2620 void Sxtw(const Register& rd, const Register& rn) {
2621 VIXL_ASSERT(allow_macro_instructions_);
2622 VIXL_ASSERT(!rd.IsZero());
2623 VIXL_ASSERT(!rn.IsZero());
2624 SingleEmissionCheckScope guard(this);
2625 sxtw(rd, rn);
2626 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vm)2627 void Tbl(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
2628 VIXL_ASSERT(allow_macro_instructions_);
2629 SingleEmissionCheckScope guard(this);
2630 tbl(vd, vn, vm);
2631 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)2632 void Tbl(const VRegister& vd,
2633 const VRegister& vn,
2634 const VRegister& vn2,
2635 const VRegister& vm) {
2636 VIXL_ASSERT(allow_macro_instructions_);
2637 SingleEmissionCheckScope guard(this);
2638 tbl(vd, vn, vn2, vm);
2639 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)2640 void Tbl(const VRegister& vd,
2641 const VRegister& vn,
2642 const VRegister& vn2,
2643 const VRegister& vn3,
2644 const VRegister& vm) {
2645 VIXL_ASSERT(allow_macro_instructions_);
2646 SingleEmissionCheckScope guard(this);
2647 tbl(vd, vn, vn2, vn3, vm);
2648 }
Tbl(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)2649 void Tbl(const VRegister& vd,
2650 const VRegister& vn,
2651 const VRegister& vn2,
2652 const VRegister& vn3,
2653 const VRegister& vn4,
2654 const VRegister& vm) {
2655 VIXL_ASSERT(allow_macro_instructions_);
2656 SingleEmissionCheckScope guard(this);
2657 tbl(vd, vn, vn2, vn3, vn4, vm);
2658 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vm)2659 void Tbx(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
2660 VIXL_ASSERT(allow_macro_instructions_);
2661 SingleEmissionCheckScope guard(this);
2662 tbx(vd, vn, vm);
2663 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vm)2664 void Tbx(const VRegister& vd,
2665 const VRegister& vn,
2666 const VRegister& vn2,
2667 const VRegister& vm) {
2668 VIXL_ASSERT(allow_macro_instructions_);
2669 SingleEmissionCheckScope guard(this);
2670 tbx(vd, vn, vn2, vm);
2671 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vm)2672 void Tbx(const VRegister& vd,
2673 const VRegister& vn,
2674 const VRegister& vn2,
2675 const VRegister& vn3,
2676 const VRegister& vm) {
2677 VIXL_ASSERT(allow_macro_instructions_);
2678 SingleEmissionCheckScope guard(this);
2679 tbx(vd, vn, vn2, vn3, vm);
2680 }
Tbx(const VRegister & vd,const VRegister & vn,const VRegister & vn2,const VRegister & vn3,const VRegister & vn4,const VRegister & vm)2681 void Tbx(const VRegister& vd,
2682 const VRegister& vn,
2683 const VRegister& vn2,
2684 const VRegister& vn3,
2685 const VRegister& vn4,
2686 const VRegister& vm) {
2687 VIXL_ASSERT(allow_macro_instructions_);
2688 SingleEmissionCheckScope guard(this);
2689 tbx(vd, vn, vn2, vn3, vn4, vm);
2690 }
2691 void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
2692 void Tbz(const Register& rt, unsigned bit_pos, Label* label);
Ubfiz(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2693 void Ubfiz(const Register& rd,
2694 const Register& rn,
2695 unsigned lsb,
2696 unsigned width) {
2697 VIXL_ASSERT(allow_macro_instructions_);
2698 VIXL_ASSERT(!rd.IsZero());
2699 VIXL_ASSERT(!rn.IsZero());
2700 SingleEmissionCheckScope guard(this);
2701 ubfiz(rd, rn, lsb, width);
2702 }
Ubfm(const Register & rd,const Register & rn,unsigned immr,unsigned imms)2703 void Ubfm(const Register& rd,
2704 const Register& rn,
2705 unsigned immr,
2706 unsigned imms) {
2707 VIXL_ASSERT(allow_macro_instructions_);
2708 VIXL_ASSERT(!rd.IsZero());
2709 VIXL_ASSERT(!rn.IsZero());
2710 SingleEmissionCheckScope guard(this);
2711 ubfm(rd, rn, immr, imms);
2712 }
Ubfx(const Register & rd,const Register & rn,unsigned lsb,unsigned width)2713 void Ubfx(const Register& rd,
2714 const Register& rn,
2715 unsigned lsb,
2716 unsigned width) {
2717 VIXL_ASSERT(allow_macro_instructions_);
2718 VIXL_ASSERT(!rd.IsZero());
2719 VIXL_ASSERT(!rn.IsZero());
2720 SingleEmissionCheckScope guard(this);
2721 ubfx(rd, rn, lsb, width);
2722 }
2723 void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
2724 VIXL_ASSERT(allow_macro_instructions_);
2725 VIXL_ASSERT(!rn.IsZero());
2726 SingleEmissionCheckScope guard(this);
2727 ucvtf(vd, rn, fbits);
2728 }
Udiv(const Register & rd,const Register & rn,const Register & rm)2729 void Udiv(const Register& rd, const Register& rn, const Register& rm) {
2730 VIXL_ASSERT(allow_macro_instructions_);
2731 VIXL_ASSERT(!rd.IsZero());
2732 VIXL_ASSERT(!rn.IsZero());
2733 VIXL_ASSERT(!rm.IsZero());
2734 SingleEmissionCheckScope guard(this);
2735 udiv(rd, rn, rm);
2736 }
Umaddl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2737 void Umaddl(const Register& rd,
2738 const Register& rn,
2739 const Register& rm,
2740 const Register& ra) {
2741 VIXL_ASSERT(allow_macro_instructions_);
2742 VIXL_ASSERT(!rd.IsZero());
2743 VIXL_ASSERT(!rn.IsZero());
2744 VIXL_ASSERT(!rm.IsZero());
2745 VIXL_ASSERT(!ra.IsZero());
2746 SingleEmissionCheckScope guard(this);
2747 umaddl(rd, rn, rm, ra);
2748 }
Umull(const Register & rd,const Register & rn,const Register & rm)2749 void Umull(const Register& rd, const Register& rn, const Register& rm) {
2750 VIXL_ASSERT(allow_macro_instructions_);
2751 VIXL_ASSERT(!rd.IsZero());
2752 VIXL_ASSERT(!rn.IsZero());
2753 VIXL_ASSERT(!rm.IsZero());
2754 SingleEmissionCheckScope guard(this);
2755 umull(rd, rn, rm);
2756 }
Umulh(const Register & xd,const Register & xn,const Register & xm)2757 void Umulh(const Register& xd, const Register& xn, const Register& xm) {
2758 VIXL_ASSERT(allow_macro_instructions_);
2759 VIXL_ASSERT(!xd.IsZero());
2760 VIXL_ASSERT(!xn.IsZero());
2761 VIXL_ASSERT(!xm.IsZero());
2762 SingleEmissionCheckScope guard(this);
2763 umulh(xd, xn, xm);
2764 }
Umsubl(const Register & rd,const Register & rn,const Register & rm,const Register & ra)2765 void Umsubl(const Register& rd,
2766 const Register& rn,
2767 const Register& rm,
2768 const Register& ra) {
2769 VIXL_ASSERT(allow_macro_instructions_);
2770 VIXL_ASSERT(!rd.IsZero());
2771 VIXL_ASSERT(!rn.IsZero());
2772 VIXL_ASSERT(!rm.IsZero());
2773 VIXL_ASSERT(!ra.IsZero());
2774 SingleEmissionCheckScope guard(this);
2775 umsubl(rd, rn, rm, ra);
2776 }
Unreachable()2777 void Unreachable() {
2778 VIXL_ASSERT(allow_macro_instructions_);
2779 SingleEmissionCheckScope guard(this);
2780 if (generate_simulator_code_) {
2781 hlt(kUnreachableOpcode);
2782 } else {
2783 // Use the architecturally-defined UDF instruction to abort on hardware,
2784 // because using HLT and BRK tends to make the process difficult to debug.
2785 udf(kUnreachableOpcode);
2786 }
2787 }
Uxtb(const Register & rd,const Register & rn)2788 void Uxtb(const Register& rd, const Register& rn) {
2789 VIXL_ASSERT(allow_macro_instructions_);
2790 VIXL_ASSERT(!rd.IsZero());
2791 VIXL_ASSERT(!rn.IsZero());
2792 SingleEmissionCheckScope guard(this);
2793 uxtb(rd, rn);
2794 }
Uxth(const Register & rd,const Register & rn)2795 void Uxth(const Register& rd, const Register& rn) {
2796 VIXL_ASSERT(allow_macro_instructions_);
2797 VIXL_ASSERT(!rd.IsZero());
2798 VIXL_ASSERT(!rn.IsZero());
2799 SingleEmissionCheckScope guard(this);
2800 uxth(rd, rn);
2801 }
Uxtw(const Register & rd,const Register & rn)2802 void Uxtw(const Register& rd, const Register& rn) {
2803 VIXL_ASSERT(allow_macro_instructions_);
2804 VIXL_ASSERT(!rd.IsZero());
2805 VIXL_ASSERT(!rn.IsZero());
2806 SingleEmissionCheckScope guard(this);
2807 uxtw(rd, rn);
2808 }
2809
2810 // NEON 3 vector register instructions.
2811 #define NEON_3VREG_MACRO_LIST(V) \
2812 V(add, Add) \
2813 V(addhn, Addhn) \
2814 V(addhn2, Addhn2) \
2815 V(addp, Addp) \
2816 V(and_, And) \
2817 V(bic, Bic) \
2818 V(bif, Bif) \
2819 V(bit, Bit) \
2820 V(bsl, Bsl) \
2821 V(cmeq, Cmeq) \
2822 V(cmge, Cmge) \
2823 V(cmgt, Cmgt) \
2824 V(cmhi, Cmhi) \
2825 V(cmhs, Cmhs) \
2826 V(cmtst, Cmtst) \
2827 V(eor, Eor) \
2828 V(fabd, Fabd) \
2829 V(facge, Facge) \
2830 V(facgt, Facgt) \
2831 V(faddp, Faddp) \
2832 V(fcmeq, Fcmeq) \
2833 V(fcmge, Fcmge) \
2834 V(fcmgt, Fcmgt) \
2835 V(fmaxnmp, Fmaxnmp) \
2836 V(fmaxp, Fmaxp) \
2837 V(fminnmp, Fminnmp) \
2838 V(fminp, Fminp) \
2839 V(fmla, Fmla) \
2840 V(fmlal, Fmlal) \
2841 V(fmlal2, Fmlal2) \
2842 V(fmls, Fmls) \
2843 V(fmlsl, Fmlsl) \
2844 V(fmlsl2, Fmlsl2) \
2845 V(fmulx, Fmulx) \
2846 V(frecps, Frecps) \
2847 V(frsqrts, Frsqrts) \
2848 V(mla, Mla) \
2849 V(mls, Mls) \
2850 V(mul, Mul) \
2851 V(orn, Orn) \
2852 V(orr, Orr) \
2853 V(pmul, Pmul) \
2854 V(pmull, Pmull) \
2855 V(pmull2, Pmull2) \
2856 V(raddhn, Raddhn) \
2857 V(raddhn2, Raddhn2) \
2858 V(rsubhn, Rsubhn) \
2859 V(rsubhn2, Rsubhn2) \
2860 V(saba, Saba) \
2861 V(sabal, Sabal) \
2862 V(sabal2, Sabal2) \
2863 V(sabd, Sabd) \
2864 V(sabdl, Sabdl) \
2865 V(sabdl2, Sabdl2) \
2866 V(saddl, Saddl) \
2867 V(saddl2, Saddl2) \
2868 V(saddw, Saddw) \
2869 V(saddw2, Saddw2) \
2870 V(shadd, Shadd) \
2871 V(shsub, Shsub) \
2872 V(smax, Smax) \
2873 V(smaxp, Smaxp) \
2874 V(smin, Smin) \
2875 V(sminp, Sminp) \
2876 V(smlal, Smlal) \
2877 V(smlal2, Smlal2) \
2878 V(smlsl, Smlsl) \
2879 V(smlsl2, Smlsl2) \
2880 V(smull, Smull) \
2881 V(smull2, Smull2) \
2882 V(sqadd, Sqadd) \
2883 V(sqdmlal, Sqdmlal) \
2884 V(sqdmlal2, Sqdmlal2) \
2885 V(sqdmlsl, Sqdmlsl) \
2886 V(sqdmlsl2, Sqdmlsl2) \
2887 V(sqdmulh, Sqdmulh) \
2888 V(sqdmull, Sqdmull) \
2889 V(sqdmull2, Sqdmull2) \
2890 V(sqrdmulh, Sqrdmulh) \
2891 V(sdot, Sdot) \
2892 V(sqrdmlah, Sqrdmlah) \
2893 V(udot, Udot) \
2894 V(sqrdmlsh, Sqrdmlsh) \
2895 V(sqrshl, Sqrshl) \
2896 V(sqshl, Sqshl) \
2897 V(sqsub, Sqsub) \
2898 V(srhadd, Srhadd) \
2899 V(srshl, Srshl) \
2900 V(sshl, Sshl) \
2901 V(ssubl, Ssubl) \
2902 V(ssubl2, Ssubl2) \
2903 V(ssubw, Ssubw) \
2904 V(ssubw2, Ssubw2) \
2905 V(sub, Sub) \
2906 V(subhn, Subhn) \
2907 V(subhn2, Subhn2) \
2908 V(trn1, Trn1) \
2909 V(trn2, Trn2) \
2910 V(uaba, Uaba) \
2911 V(uabal, Uabal) \
2912 V(uabal2, Uabal2) \
2913 V(uabd, Uabd) \
2914 V(uabdl, Uabdl) \
2915 V(uabdl2, Uabdl2) \
2916 V(uaddl, Uaddl) \
2917 V(uaddl2, Uaddl2) \
2918 V(uaddw, Uaddw) \
2919 V(uaddw2, Uaddw2) \
2920 V(uhadd, Uhadd) \
2921 V(uhsub, Uhsub) \
2922 V(umax, Umax) \
2923 V(umaxp, Umaxp) \
2924 V(umin, Umin) \
2925 V(uminp, Uminp) \
2926 V(umlal, Umlal) \
2927 V(umlal2, Umlal2) \
2928 V(umlsl, Umlsl) \
2929 V(umlsl2, Umlsl2) \
2930 V(umull, Umull) \
2931 V(umull2, Umull2) \
2932 V(uqadd, Uqadd) \
2933 V(uqrshl, Uqrshl) \
2934 V(uqshl, Uqshl) \
2935 V(uqsub, Uqsub) \
2936 V(urhadd, Urhadd) \
2937 V(urshl, Urshl) \
2938 V(ushl, Ushl) \
2939 V(usubl, Usubl) \
2940 V(usubl2, Usubl2) \
2941 V(usubw, Usubw) \
2942 V(usubw2, Usubw2) \
2943 V(uzp1, Uzp1) \
2944 V(uzp2, Uzp2) \
2945 V(zip1, Zip1) \
2946 V(zip2, Zip2) \
2947 V(smmla, Smmla) \
2948 V(ummla, Ummla) \
2949 V(usmmla, Usmmla) \
2950 V(usdot, Usdot)
2951
2952 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
2953 void MASM(const VRegister& vd, const VRegister& vn, const VRegister& vm) { \
2954 VIXL_ASSERT(allow_macro_instructions_); \
2955 SingleEmissionCheckScope guard(this); \
2956 ASM(vd, vn, vm); \
2957 }
2958 NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
2959 #undef DEFINE_MACRO_ASM_FUNC
2960
2961 // NEON 2 vector register instructions.
2962 #define NEON_2VREG_MACRO_LIST(V) \
2963 V(abs, Abs) \
2964 V(addp, Addp) \
2965 V(addv, Addv) \
2966 V(cls, Cls) \
2967 V(clz, Clz) \
2968 V(cnt, Cnt) \
2969 V(fabs, Fabs) \
2970 V(faddp, Faddp) \
2971 V(fcvtas, Fcvtas) \
2972 V(fcvtau, Fcvtau) \
2973 V(fcvtms, Fcvtms) \
2974 V(fcvtmu, Fcvtmu) \
2975 V(fcvtns, Fcvtns) \
2976 V(fcvtnu, Fcvtnu) \
2977 V(fcvtps, Fcvtps) \
2978 V(fcvtpu, Fcvtpu) \
2979 V(fmaxnmp, Fmaxnmp) \
2980 V(fmaxnmv, Fmaxnmv) \
2981 V(fmaxp, Fmaxp) \
2982 V(fmaxv, Fmaxv) \
2983 V(fminnmp, Fminnmp) \
2984 V(fminnmv, Fminnmv) \
2985 V(fminp, Fminp) \
2986 V(fminv, Fminv) \
2987 V(fneg, Fneg) \
2988 V(frecpe, Frecpe) \
2989 V(frecpx, Frecpx) \
2990 V(frint32x, Frint32x) \
2991 V(frint32z, Frint32z) \
2992 V(frint64x, Frint64x) \
2993 V(frint64z, Frint64z) \
2994 V(frinta, Frinta) \
2995 V(frinti, Frinti) \
2996 V(frintm, Frintm) \
2997 V(frintn, Frintn) \
2998 V(frintp, Frintp) \
2999 V(frintx, Frintx) \
3000 V(frintz, Frintz) \
3001 V(frsqrte, Frsqrte) \
3002 V(fsqrt, Fsqrt) \
3003 V(mov, Mov) \
3004 V(mvn, Mvn) \
3005 V(neg, Neg) \
3006 V(not_, Not) \
3007 V(rbit, Rbit) \
3008 V(rev16, Rev16) \
3009 V(rev32, Rev32) \
3010 V(rev64, Rev64) \
3011 V(sadalp, Sadalp) \
3012 V(saddlp, Saddlp) \
3013 V(saddlv, Saddlv) \
3014 V(smaxv, Smaxv) \
3015 V(sminv, Sminv) \
3016 V(sqabs, Sqabs) \
3017 V(sqneg, Sqneg) \
3018 V(sqxtn, Sqxtn) \
3019 V(sqxtn2, Sqxtn2) \
3020 V(sqxtun, Sqxtun) \
3021 V(sqxtun2, Sqxtun2) \
3022 V(suqadd, Suqadd) \
3023 V(sxtl, Sxtl) \
3024 V(sxtl2, Sxtl2) \
3025 V(uadalp, Uadalp) \
3026 V(uaddlp, Uaddlp) \
3027 V(uaddlv, Uaddlv) \
3028 V(umaxv, Umaxv) \
3029 V(uminv, Uminv) \
3030 V(uqxtn, Uqxtn) \
3031 V(uqxtn2, Uqxtn2) \
3032 V(urecpe, Urecpe) \
3033 V(ursqrte, Ursqrte) \
3034 V(usqadd, Usqadd) \
3035 V(uxtl, Uxtl) \
3036 V(uxtl2, Uxtl2) \
3037 V(xtn, Xtn) \
3038 V(xtn2, Xtn2)
3039
3040 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
3041 void MASM(const VRegister& vd, const VRegister& vn) { \
3042 VIXL_ASSERT(allow_macro_instructions_); \
3043 SingleEmissionCheckScope guard(this); \
3044 ASM(vd, vn); \
3045 }
NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)3046 NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
3047 #undef DEFINE_MACRO_ASM_FUNC
3048
3049 // NEON 2 vector register with immediate instructions.
3050 #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
3051 V(fcmeq, Fcmeq) \
3052 V(fcmge, Fcmge) \
3053 V(fcmgt, Fcmgt) \
3054 V(fcmle, Fcmle) \
3055 V(fcmlt, Fcmlt)
3056
3057 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
3058 void MASM(const VRegister& vd, const VRegister& vn, double imm) { \
3059 VIXL_ASSERT(allow_macro_instructions_); \
3060 SingleEmissionCheckScope guard(this); \
3061 ASM(vd, vn, imm); \
3062 }
3063 NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
3064 #undef DEFINE_MACRO_ASM_FUNC
3065
3066 // NEON by element instructions.
3067 #define NEON_BYELEMENT_MACRO_LIST(V) \
3068 V(fmul, Fmul) \
3069 V(fmla, Fmla) \
3070 V(fmlal, Fmlal) \
3071 V(fmlal2, Fmlal2) \
3072 V(fmls, Fmls) \
3073 V(fmlsl, Fmlsl) \
3074 V(fmlsl2, Fmlsl2) \
3075 V(fmulx, Fmulx) \
3076 V(mul, Mul) \
3077 V(mla, Mla) \
3078 V(mls, Mls) \
3079 V(sqdmulh, Sqdmulh) \
3080 V(sqrdmulh, Sqrdmulh) \
3081 V(sdot, Sdot) \
3082 V(sqrdmlah, Sqrdmlah) \
3083 V(udot, Udot) \
3084 V(sqrdmlsh, Sqrdmlsh) \
3085 V(sqdmull, Sqdmull) \
3086 V(sqdmull2, Sqdmull2) \
3087 V(sqdmlal, Sqdmlal) \
3088 V(sqdmlal2, Sqdmlal2) \
3089 V(sqdmlsl, Sqdmlsl) \
3090 V(sqdmlsl2, Sqdmlsl2) \
3091 V(smull, Smull) \
3092 V(smull2, Smull2) \
3093 V(smlal, Smlal) \
3094 V(smlal2, Smlal2) \
3095 V(smlsl, Smlsl) \
3096 V(smlsl2, Smlsl2) \
3097 V(umull, Umull) \
3098 V(umull2, Umull2) \
3099 V(umlal, Umlal) \
3100 V(umlal2, Umlal2) \
3101 V(umlsl, Umlsl) \
3102 V(umlsl2, Umlsl2) \
3103 V(sudot, Sudot) \
3104 V(usdot, Usdot)
3105
3106
3107 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
3108 void MASM(const VRegister& vd, \
3109 const VRegister& vn, \
3110 const VRegister& vm, \
3111 int vm_index) { \
3112 VIXL_ASSERT(allow_macro_instructions_); \
3113 SingleEmissionCheckScope guard(this); \
3114 ASM(vd, vn, vm, vm_index); \
3115 }
3116 NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
3117 #undef DEFINE_MACRO_ASM_FUNC
3118
3119 #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
3120 V(rshrn, Rshrn) \
3121 V(rshrn2, Rshrn2) \
3122 V(shl, Shl) \
3123 V(shll, Shll) \
3124 V(shll2, Shll2) \
3125 V(shrn, Shrn) \
3126 V(shrn2, Shrn2) \
3127 V(sli, Sli) \
3128 V(sqrshrn, Sqrshrn) \
3129 V(sqrshrn2, Sqrshrn2) \
3130 V(sqrshrun, Sqrshrun) \
3131 V(sqrshrun2, Sqrshrun2) \
3132 V(sqshl, Sqshl) \
3133 V(sqshlu, Sqshlu) \
3134 V(sqshrn, Sqshrn) \
3135 V(sqshrn2, Sqshrn2) \
3136 V(sqshrun, Sqshrun) \
3137 V(sqshrun2, Sqshrun2) \
3138 V(sri, Sri) \
3139 V(srshr, Srshr) \
3140 V(srsra, Srsra) \
3141 V(sshr, Sshr) \
3142 V(ssra, Ssra) \
3143 V(uqrshrn, Uqrshrn) \
3144 V(uqrshrn2, Uqrshrn2) \
3145 V(uqshl, Uqshl) \
3146 V(uqshrn, Uqshrn) \
3147 V(uqshrn2, Uqshrn2) \
3148 V(urshr, Urshr) \
3149 V(ursra, Ursra) \
3150 V(ushr, Ushr) \
3151 V(usra, Usra)
3152
3153 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
3154 void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
3155 VIXL_ASSERT(allow_macro_instructions_); \
3156 SingleEmissionCheckScope guard(this); \
3157 ASM(vd, vn, shift); \
3158 }
3159 NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
3160 #undef DEFINE_MACRO_ASM_FUNC
3161
3162 #define NEON_2VREG_SHIFT_LONG_MACRO_LIST(V) \
3163 V(shll, sshll, Sshll) \
3164 V(shll, ushll, Ushll) \
3165 V(shll2, sshll2, Sshll2) \
3166 V(shll2, ushll2, Ushll2)
3167
3168 #define DEFINE_MACRO_ASM_FUNC(ASM1, ASM2, MASM) \
3169 void MASM(const VRegister& vd, const VRegister& vn, int shift) { \
3170 VIXL_ASSERT(allow_macro_instructions_); \
3171 SingleEmissionCheckScope guard(this); \
3172 if (vn.GetLaneSizeInBits() == static_cast<unsigned>(shift)) { \
3173 ASM1(vd, vn, shift); \
3174 } else { \
3175 ASM2(vd, vn, shift); \
3176 } \
3177 }
3178 NEON_2VREG_SHIFT_LONG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
3179 #undef DEFINE_MACRO_ASM_FUNC
3180
3181 // SVE 3 vector register instructions.
3182 #define SVE_3VREG_COMMUTATIVE_MACRO_LIST(V) \
3183 V(add, Add) \
3184 V(and_, And) \
3185 V(bic, Bic) \
3186 V(eor, Eor) \
3187 V(mul, Mul) \
3188 V(orr, Orr) \
3189 V(sabd, Sabd) \
3190 V(shadd, Shadd) \
3191 V(smax, Smax) \
3192 V(smin, Smin) \
3193 V(smulh, Smulh) \
3194 V(sqadd, Sqadd) \
3195 V(srhadd, Srhadd) \
3196 V(uabd, Uabd) \
3197 V(uhadd, Uhadd) \
3198 V(umax, Umax) \
3199 V(umin, Umin) \
3200 V(umulh, Umulh) \
3201 V(uqadd, Uqadd) \
3202 V(urhadd, Urhadd)
3203
3204 #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
3205 void MASM(const ZRegister& zd, \
3206 const PRegisterM& pg, \
3207 const ZRegister& zn, \
3208 const ZRegister& zm) { \
3209 VIXL_ASSERT(allow_macro_instructions_); \
3210 if (zd.Aliases(zn)) { \
3211 SingleEmissionCheckScope guard(this); \
3212 ASM(zd, pg, zd, zm); \
3213 } else if (zd.Aliases(zm)) { \
3214 SingleEmissionCheckScope guard(this); \
3215 ASM(zd, pg, zd, zn); \
3216 } else { \
3217 MovprfxHelperScope guard(this, zd, pg, zn); \
3218 ASM(zd, pg, zd, zm); \
3219 } \
3220 }
3221 SVE_3VREG_COMMUTATIVE_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
3222 #undef DEFINE_MACRO_ASM_FUNC
3223
3224 void Bic(const VRegister& vd, const int imm8, const int left_shift = 0) {
3225 VIXL_ASSERT(allow_macro_instructions_);
3226 SingleEmissionCheckScope guard(this);
3227 bic(vd, imm8, left_shift);
3228 }
Cmeq(const VRegister & vd,const VRegister & vn,int imm)3229 void Cmeq(const VRegister& vd, const VRegister& vn, int imm) {
3230 VIXL_ASSERT(allow_macro_instructions_);
3231 SingleEmissionCheckScope guard(this);
3232 cmeq(vd, vn, imm);
3233 }
Cmge(const VRegister & vd,const VRegister & vn,int imm)3234 void Cmge(const VRegister& vd, const VRegister& vn, int imm) {
3235 VIXL_ASSERT(allow_macro_instructions_);
3236 SingleEmissionCheckScope guard(this);
3237 cmge(vd, vn, imm);
3238 }
Cmgt(const VRegister & vd,const VRegister & vn,int imm)3239 void Cmgt(const VRegister& vd, const VRegister& vn, int imm) {
3240 VIXL_ASSERT(allow_macro_instructions_);
3241 SingleEmissionCheckScope guard(this);
3242 cmgt(vd, vn, imm);
3243 }
Cmle(const VRegister & vd,const VRegister & vn,int imm)3244 void Cmle(const VRegister& vd, const VRegister& vn, int imm) {
3245 VIXL_ASSERT(allow_macro_instructions_);
3246 SingleEmissionCheckScope guard(this);
3247 cmle(vd, vn, imm);
3248 }
Cmlt(const VRegister & vd,const VRegister & vn,int imm)3249 void Cmlt(const VRegister& vd, const VRegister& vn, int imm) {
3250 VIXL_ASSERT(allow_macro_instructions_);
3251 SingleEmissionCheckScope guard(this);
3252 cmlt(vd, vn, imm);
3253 }
Dup(const VRegister & vd,const VRegister & vn,int index)3254 void Dup(const VRegister& vd, const VRegister& vn, int index) {
3255 VIXL_ASSERT(allow_macro_instructions_);
3256 SingleEmissionCheckScope guard(this);
3257 dup(vd, vn, index);
3258 }
Dup(const VRegister & vd,const Register & rn)3259 void Dup(const VRegister& vd, const Register& rn) {
3260 VIXL_ASSERT(allow_macro_instructions_);
3261 SingleEmissionCheckScope guard(this);
3262 dup(vd, rn);
3263 }
Ext(const VRegister & vd,const VRegister & vn,const VRegister & vm,int index)3264 void Ext(const VRegister& vd,
3265 const VRegister& vn,
3266 const VRegister& vm,
3267 int index) {
3268 VIXL_ASSERT(allow_macro_instructions_);
3269 SingleEmissionCheckScope guard(this);
3270 ext(vd, vn, vm, index);
3271 }
Fcadd(const VRegister & vd,const VRegister & vn,const VRegister & vm,int rot)3272 void Fcadd(const VRegister& vd,
3273 const VRegister& vn,
3274 const VRegister& vm,
3275 int rot) {
3276 VIXL_ASSERT(allow_macro_instructions_);
3277 SingleEmissionCheckScope guard(this);
3278 fcadd(vd, vn, vm, rot);
3279 }
Fcmla(const VRegister & vd,const VRegister & vn,const VRegister & vm,int vm_index,int rot)3280 void Fcmla(const VRegister& vd,
3281 const VRegister& vn,
3282 const VRegister& vm,
3283 int vm_index,
3284 int rot) {
3285 VIXL_ASSERT(allow_macro_instructions_);
3286 SingleEmissionCheckScope guard(this);
3287 fcmla(vd, vn, vm, vm_index, rot);
3288 }
Fcmla(const VRegister & vd,const VRegister & vn,const VRegister & vm,int rot)3289 void Fcmla(const VRegister& vd,
3290 const VRegister& vn,
3291 const VRegister& vm,
3292 int rot) {
3293 VIXL_ASSERT(allow_macro_instructions_);
3294 SingleEmissionCheckScope guard(this);
3295 fcmla(vd, vn, vm, rot);
3296 }
Ins(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)3297 void Ins(const VRegister& vd,
3298 int vd_index,
3299 const VRegister& vn,
3300 int vn_index) {
3301 VIXL_ASSERT(allow_macro_instructions_);
3302 SingleEmissionCheckScope guard(this);
3303 ins(vd, vd_index, vn, vn_index);
3304 }
Ins(const VRegister & vd,int vd_index,const Register & rn)3305 void Ins(const VRegister& vd, int vd_index, const Register& rn) {
3306 VIXL_ASSERT(allow_macro_instructions_);
3307 SingleEmissionCheckScope guard(this);
3308 ins(vd, vd_index, rn);
3309 }
Ld1(const VRegister & vt,const MemOperand & src)3310 void Ld1(const VRegister& vt, const MemOperand& src) {
3311 VIXL_ASSERT(allow_macro_instructions_);
3312 SingleEmissionCheckScope guard(this);
3313 ld1(vt, src);
3314 }
Ld1(const VRegister & vt,const VRegister & vt2,const MemOperand & src)3315 void Ld1(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
3316 VIXL_ASSERT(allow_macro_instructions_);
3317 SingleEmissionCheckScope guard(this);
3318 ld1(vt, vt2, src);
3319 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)3320 void Ld1(const VRegister& vt,
3321 const VRegister& vt2,
3322 const VRegister& vt3,
3323 const MemOperand& src) {
3324 VIXL_ASSERT(allow_macro_instructions_);
3325 SingleEmissionCheckScope guard(this);
3326 ld1(vt, vt2, vt3, src);
3327 }
Ld1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)3328 void Ld1(const VRegister& vt,
3329 const VRegister& vt2,
3330 const VRegister& vt3,
3331 const VRegister& vt4,
3332 const MemOperand& src) {
3333 VIXL_ASSERT(allow_macro_instructions_);
3334 SingleEmissionCheckScope guard(this);
3335 ld1(vt, vt2, vt3, vt4, src);
3336 }
Ld1(const VRegister & vt,int lane,const MemOperand & src)3337 void Ld1(const VRegister& vt, int lane, const MemOperand& src) {
3338 VIXL_ASSERT(allow_macro_instructions_);
3339 SingleEmissionCheckScope guard(this);
3340 ld1(vt, lane, src);
3341 }
Ld1r(const VRegister & vt,const MemOperand & src)3342 void Ld1r(const VRegister& vt, const MemOperand& src) {
3343 VIXL_ASSERT(allow_macro_instructions_);
3344 SingleEmissionCheckScope guard(this);
3345 ld1r(vt, src);
3346 }
Ld2(const VRegister & vt,const VRegister & vt2,const MemOperand & src)3347 void Ld2(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
3348 VIXL_ASSERT(allow_macro_instructions_);
3349 SingleEmissionCheckScope guard(this);
3350 ld2(vt, vt2, src);
3351 }
Ld2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & src)3352 void Ld2(const VRegister& vt,
3353 const VRegister& vt2,
3354 int lane,
3355 const MemOperand& src) {
3356 VIXL_ASSERT(allow_macro_instructions_);
3357 SingleEmissionCheckScope guard(this);
3358 ld2(vt, vt2, lane, src);
3359 }
Ld2r(const VRegister & vt,const VRegister & vt2,const MemOperand & src)3360 void Ld2r(const VRegister& vt, const VRegister& vt2, const MemOperand& src) {
3361 VIXL_ASSERT(allow_macro_instructions_);
3362 SingleEmissionCheckScope guard(this);
3363 ld2r(vt, vt2, src);
3364 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)3365 void Ld3(const VRegister& vt,
3366 const VRegister& vt2,
3367 const VRegister& vt3,
3368 const MemOperand& src) {
3369 VIXL_ASSERT(allow_macro_instructions_);
3370 SingleEmissionCheckScope guard(this);
3371 ld3(vt, vt2, vt3, src);
3372 }
Ld3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & src)3373 void Ld3(const VRegister& vt,
3374 const VRegister& vt2,
3375 const VRegister& vt3,
3376 int lane,
3377 const MemOperand& src) {
3378 VIXL_ASSERT(allow_macro_instructions_);
3379 SingleEmissionCheckScope guard(this);
3380 ld3(vt, vt2, vt3, lane, src);
3381 }
Ld3r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & src)3382 void Ld3r(const VRegister& vt,
3383 const VRegister& vt2,
3384 const VRegister& vt3,
3385 const MemOperand& src) {
3386 VIXL_ASSERT(allow_macro_instructions_);
3387 SingleEmissionCheckScope guard(this);
3388 ld3r(vt, vt2, vt3, src);
3389 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)3390 void Ld4(const VRegister& vt,
3391 const VRegister& vt2,
3392 const VRegister& vt3,
3393 const VRegister& vt4,
3394 const MemOperand& src) {
3395 VIXL_ASSERT(allow_macro_instructions_);
3396 SingleEmissionCheckScope guard(this);
3397 ld4(vt, vt2, vt3, vt4, src);
3398 }
Ld4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & src)3399 void Ld4(const VRegister& vt,
3400 const VRegister& vt2,
3401 const VRegister& vt3,
3402 const VRegister& vt4,
3403 int lane,
3404 const MemOperand& src) {
3405 VIXL_ASSERT(allow_macro_instructions_);
3406 SingleEmissionCheckScope guard(this);
3407 ld4(vt, vt2, vt3, vt4, lane, src);
3408 }
Ld4r(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & src)3409 void Ld4r(const VRegister& vt,
3410 const VRegister& vt2,
3411 const VRegister& vt3,
3412 const VRegister& vt4,
3413 const MemOperand& src) {
3414 VIXL_ASSERT(allow_macro_instructions_);
3415 SingleEmissionCheckScope guard(this);
3416 ld4r(vt, vt2, vt3, vt4, src);
3417 }
Mov(const VRegister & vd,int vd_index,const VRegister & vn,int vn_index)3418 void Mov(const VRegister& vd,
3419 int vd_index,
3420 const VRegister& vn,
3421 int vn_index) {
3422 VIXL_ASSERT(allow_macro_instructions_);
3423 SingleEmissionCheckScope guard(this);
3424 mov(vd, vd_index, vn, vn_index);
3425 }
Mov(const VRegister & vd,const VRegister & vn,int index)3426 void Mov(const VRegister& vd, const VRegister& vn, int index) {
3427 VIXL_ASSERT(allow_macro_instructions_);
3428 SingleEmissionCheckScope guard(this);
3429 mov(vd, vn, index);
3430 }
Mov(const VRegister & vd,int vd_index,const Register & rn)3431 void Mov(const VRegister& vd, int vd_index, const Register& rn) {
3432 VIXL_ASSERT(allow_macro_instructions_);
3433 SingleEmissionCheckScope guard(this);
3434 mov(vd, vd_index, rn);
3435 }
Mov(const Register & rd,const VRegister & vn,int vn_index)3436 void Mov(const Register& rd, const VRegister& vn, int vn_index) {
3437 VIXL_ASSERT(allow_macro_instructions_);
3438 SingleEmissionCheckScope guard(this);
3439 mov(rd, vn, vn_index);
3440 }
3441 void Movi(const VRegister& vd,
3442 uint64_t imm,
3443 Shift shift = LSL,
3444 int shift_amount = 0);
3445 void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
3446 void Mvni(const VRegister& vd,
3447 const int imm8,
3448 Shift shift = LSL,
3449 const int shift_amount = 0) {
3450 VIXL_ASSERT(allow_macro_instructions_);
3451 SingleEmissionCheckScope guard(this);
3452 mvni(vd, imm8, shift, shift_amount);
3453 }
3454 void Orr(const VRegister& vd, const int imm8, const int left_shift = 0) {
3455 VIXL_ASSERT(allow_macro_instructions_);
3456 SingleEmissionCheckScope guard(this);
3457 orr(vd, imm8, left_shift);
3458 }
3459 void Scvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3460 VIXL_ASSERT(allow_macro_instructions_);
3461 SingleEmissionCheckScope guard(this);
3462 scvtf(vd, vn, fbits);
3463 }
3464 void Ucvtf(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3465 VIXL_ASSERT(allow_macro_instructions_);
3466 SingleEmissionCheckScope guard(this);
3467 ucvtf(vd, vn, fbits);
3468 }
3469 void Fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3470 VIXL_ASSERT(allow_macro_instructions_);
3471 SingleEmissionCheckScope guard(this);
3472 fcvtzs(vd, vn, fbits);
3473 }
3474 void Fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0) {
3475 VIXL_ASSERT(allow_macro_instructions_);
3476 SingleEmissionCheckScope guard(this);
3477 fcvtzu(vd, vn, fbits);
3478 }
St1(const VRegister & vt,const MemOperand & dst)3479 void St1(const VRegister& vt, const MemOperand& dst) {
3480 VIXL_ASSERT(allow_macro_instructions_);
3481 SingleEmissionCheckScope guard(this);
3482 st1(vt, dst);
3483 }
St1(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)3484 void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
3485 VIXL_ASSERT(allow_macro_instructions_);
3486 SingleEmissionCheckScope guard(this);
3487 st1(vt, vt2, dst);
3488 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)3489 void St1(const VRegister& vt,
3490 const VRegister& vt2,
3491 const VRegister& vt3,
3492 const MemOperand& dst) {
3493 VIXL_ASSERT(allow_macro_instructions_);
3494 SingleEmissionCheckScope guard(this);
3495 st1(vt, vt2, vt3, dst);
3496 }
St1(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)3497 void St1(const VRegister& vt,
3498 const VRegister& vt2,
3499 const VRegister& vt3,
3500 const VRegister& vt4,
3501 const MemOperand& dst) {
3502 VIXL_ASSERT(allow_macro_instructions_);
3503 SingleEmissionCheckScope guard(this);
3504 st1(vt, vt2, vt3, vt4, dst);
3505 }
St1(const VRegister & vt,int lane,const MemOperand & dst)3506 void St1(const VRegister& vt, int lane, const MemOperand& dst) {
3507 VIXL_ASSERT(allow_macro_instructions_);
3508 SingleEmissionCheckScope guard(this);
3509 st1(vt, lane, dst);
3510 }
St2(const VRegister & vt,const VRegister & vt2,const MemOperand & dst)3511 void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
3512 VIXL_ASSERT(allow_macro_instructions_);
3513 SingleEmissionCheckScope guard(this);
3514 st2(vt, vt2, dst);
3515 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const MemOperand & dst)3516 void St3(const VRegister& vt,
3517 const VRegister& vt2,
3518 const VRegister& vt3,
3519 const MemOperand& dst) {
3520 VIXL_ASSERT(allow_macro_instructions_);
3521 SingleEmissionCheckScope guard(this);
3522 st3(vt, vt2, vt3, dst);
3523 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,const MemOperand & dst)3524 void St4(const VRegister& vt,
3525 const VRegister& vt2,
3526 const VRegister& vt3,
3527 const VRegister& vt4,
3528 const MemOperand& dst) {
3529 VIXL_ASSERT(allow_macro_instructions_);
3530 SingleEmissionCheckScope guard(this);
3531 st4(vt, vt2, vt3, vt4, dst);
3532 }
St2(const VRegister & vt,const VRegister & vt2,int lane,const MemOperand & dst)3533 void St2(const VRegister& vt,
3534 const VRegister& vt2,
3535 int lane,
3536 const MemOperand& dst) {
3537 VIXL_ASSERT(allow_macro_instructions_);
3538 SingleEmissionCheckScope guard(this);
3539 st2(vt, vt2, lane, dst);
3540 }
St3(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,int lane,const MemOperand & dst)3541 void St3(const VRegister& vt,
3542 const VRegister& vt2,
3543 const VRegister& vt3,
3544 int lane,
3545 const MemOperand& dst) {
3546 VIXL_ASSERT(allow_macro_instructions_);
3547 SingleEmissionCheckScope guard(this);
3548 st3(vt, vt2, vt3, lane, dst);
3549 }
St4(const VRegister & vt,const VRegister & vt2,const VRegister & vt3,const VRegister & vt4,int lane,const MemOperand & dst)3550 void St4(const VRegister& vt,
3551 const VRegister& vt2,
3552 const VRegister& vt3,
3553 const VRegister& vt4,
3554 int lane,
3555 const MemOperand& dst) {
3556 VIXL_ASSERT(allow_macro_instructions_);
3557 SingleEmissionCheckScope guard(this);
3558 st4(vt, vt2, vt3, vt4, lane, dst);
3559 }
Smov(const Register & rd,const VRegister & vn,int vn_index)3560 void Smov(const Register& rd, const VRegister& vn, int vn_index) {
3561 VIXL_ASSERT(allow_macro_instructions_);
3562 SingleEmissionCheckScope guard(this);
3563 smov(rd, vn, vn_index);
3564 }
Umov(const Register & rd,const VRegister & vn,int vn_index)3565 void Umov(const Register& rd, const VRegister& vn, int vn_index) {
3566 VIXL_ASSERT(allow_macro_instructions_);
3567 SingleEmissionCheckScope guard(this);
3568 umov(rd, vn, vn_index);
3569 }
Crc32b(const Register & rd,const Register & rn,const Register & rm)3570 void Crc32b(const Register& rd, const Register& rn, const Register& rm) {
3571 VIXL_ASSERT(allow_macro_instructions_);
3572 SingleEmissionCheckScope guard(this);
3573 crc32b(rd, rn, rm);
3574 }
Crc32h(const Register & rd,const Register & rn,const Register & rm)3575 void Crc32h(const Register& rd, const Register& rn, const Register& rm) {
3576 VIXL_ASSERT(allow_macro_instructions_);
3577 SingleEmissionCheckScope guard(this);
3578 crc32h(rd, rn, rm);
3579 }
Crc32w(const Register & rd,const Register & rn,const Register & rm)3580 void Crc32w(const Register& rd, const Register& rn, const Register& rm) {
3581 VIXL_ASSERT(allow_macro_instructions_);
3582 SingleEmissionCheckScope guard(this);
3583 crc32w(rd, rn, rm);
3584 }
Crc32x(const Register & rd,const Register & rn,const Register & rm)3585 void Crc32x(const Register& rd, const Register& rn, const Register& rm) {
3586 VIXL_ASSERT(allow_macro_instructions_);
3587 SingleEmissionCheckScope guard(this);
3588 crc32x(rd, rn, rm);
3589 }
Crc32cb(const Register & rd,const Register & rn,const Register & rm)3590 void Crc32cb(const Register& rd, const Register& rn, const Register& rm) {
3591 VIXL_ASSERT(allow_macro_instructions_);
3592 SingleEmissionCheckScope guard(this);
3593 crc32cb(rd, rn, rm);
3594 }
Crc32ch(const Register & rd,const Register & rn,const Register & rm)3595 void Crc32ch(const Register& rd, const Register& rn, const Register& rm) {
3596 VIXL_ASSERT(allow_macro_instructions_);
3597 SingleEmissionCheckScope guard(this);
3598 crc32ch(rd, rn, rm);
3599 }
Crc32cw(const Register & rd,const Register & rn,const Register & rm)3600 void Crc32cw(const Register& rd, const Register& rn, const Register& rm) {
3601 VIXL_ASSERT(allow_macro_instructions_);
3602 SingleEmissionCheckScope guard(this);
3603 crc32cw(rd, rn, rm);
3604 }
Crc32cx(const Register & rd,const Register & rn,const Register & rm)3605 void Crc32cx(const Register& rd, const Register& rn, const Register& rm) {
3606 VIXL_ASSERT(allow_macro_instructions_);
3607 SingleEmissionCheckScope guard(this);
3608 crc32cx(rd, rn, rm);
3609 }
3610
3611 // Scalable Vector Extensions.
Abs(const ZRegister & zd,const PRegisterM & pg,const ZRegister & zn)3612 void Abs(const ZRegister& zd, const PRegisterM& pg, const ZRegister& zn) {
3613 VIXL_ASSERT(allow_macro_instructions_);
3614 SingleEmissionCheckScope guard(this);
3615 abs(zd, pg, zn);
3616 }
Add(const ZRegister & zd,const ZRegister & zn,const ZRegister & zm)3617 void Add(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) {
3618 VIXL_ASSERT(allow_macro_instructions_);
3619 SingleEmissionCheckScope guard(this);
3620 add(zd, zn, zm);
3621 }
Add(const ZRegister & zd,const ZRegister & zn,IntegerOperand imm)3622 void Add(const ZRegister& zd, const ZRegister& zn, IntegerOperand imm) {
3623 VIXL_ASSERT(allow_macro_instructions_);
3624 AddSubHelper(kAddImmediate, zd, zn, imm);
3625 }
3626 void Addpl(const Register& xd, const Register& xn, int64_t multiplier);
3627 void Addvl(const Register& xd, const Register& xn, int64_t multiplier);
3628 // Note that unlike the core ISA, SVE's `adr` is not PC-relative.
Adr(const ZRegister & zd,const SVEMemOperand & addr)3629 void Adr(const ZRegister& zd, const SVEMemOperand& addr) {
3630 VIXL_ASSERT(allow_macro_instructions_);
3631 SingleEmissionCheckScope guard(this);
3632 adr(zd, addr);
3633 }
And(const PRegisterWithLaneSize & pd,const PRegisterZ & pg,const PRegisterWithLaneSize & pn,const PRegisterWithLaneSize & pm)3634 void And(const PRegisterWithLaneSize& pd,
3635 const PRegisterZ& pg,
3636 const PRegisterWithLaneSize& pn,
3637 const PRegisterWithLaneSize& pm) {
3638 VIXL_ASSERT(allow_macro_instructions_);
3639 SingleEmissionCheckScope guard(this);
3640 and_(pd, pg, pn, pm);
3641 }
And(const ZRegister & zd,const ZRegister & zn,uint64_t imm)3642 void And(const ZRegister& zd, const ZRegister& zn, uint64_t imm) {
3643 VIXL_ASSERT(allow_macro_instructions_);
3644 SingleEmissionCheckScope guard(this);
3645 if (IsImmLogical(imm, zd.GetLaneSizeInBits())) {
3646 and_(zd, zn, imm);
3647 } else {
3648 // TODO: Synthesise the immediate once 'Mov' is implemented.
3649 VIXL_UNIMPLEMENTED();
3650 }
3651 }
And(const ZRegister & zd,const ZRegister & zn,const ZRegister & zm)3652 void And(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) {
3653 VIXL_ASSERT(allow_macro_instructions_);
3654 VIXL_ASSERT(AreSameLaneSize(zd, zn, zm));
3655 SingleEmissionCheckScope guard(this);
3656 and_(zd.VnD(), zn.VnD(), zm.VnD());
3657 }
Ands(const PRegisterWithLaneSize & pd,const PRegisterZ & pg,const PRegisterWithLaneSize & pn,const PRegisterWithLaneSize & pm)3658 void Ands(const PRegisterWithLaneSize& pd,
3659 const PRegisterZ& pg,
3660 const PRegisterWithLaneSize& pn,
3661 const PRegisterWithLaneSize& pm) {
3662 VIXL_ASSERT(allow_macro_instructions_);
3663 SingleEmissionCheckScope guard(this);
3664 ands(pd, pg, pn, pm);
3665 }
Andv(const VRegister & vd,const PRegister & pg,const ZRegister & zn)3666 void Andv(const VRegister& vd, const PRegister& pg, const ZRegister& zn) {
3667 VIXL_ASSERT(allow_macro_instructions_);
3668 SingleEmissionCheckScope guard(this);
3669 andv(vd, pg, zn);
3670 }
Asr(const ZRegister & zd,const PRegisterM & pg,const ZRegister & zn,int shift)3671 void Asr(const ZRegister& zd,
3672 const PRegisterM& pg,
3673 const ZRegister& zn,
3674 int shift) {
3675 VIXL_ASSERT(allow_macro_instructions_);
3676 MovprfxHelperScope guard(this, zd, pg, zn);
3677 asr(zd, pg, zd, shift);
3678 }
3679 void Asr(const ZRegister& zd,
3680 const PRegisterM& pg,
3681 const ZRegister& zn,
3682 const ZRegister& zm);
Asr(const ZRegister & zd,const ZRegister & zn,int shift)3683 void Asr(const ZRegister& zd, const ZRegister& zn, int shift) {
3684 VIXL_ASSERT(allow_macro_instructions_);
3685 SingleEmissionCheckScope guard(this);
3686 asr(zd, zn, shift);
3687 }
Asr(const ZRegister & zd,const ZRegister & zn,const ZRegister & zm)3688 void Asr(const ZRegister& zd, const ZRegister& zn, const ZRegister& zm) {
3689 VIXL_ASSERT(allow_macro_instructions_);
3690 SingleEmissionCheckScope guard(this);
3691 asr(zd, zn, zm);
3692 }
Asrd(const ZRegister & zd,const PRegisterM & pg,const ZRegister & zn,int shift)3693 void Asrd(const ZRegister& zd,
3694 const PRegisterM& pg,
3695 const ZRegister& zn,
3696 int shift) {
3697 VIXL_ASSERT(allow_macro_instructions_);
3698 MovprfxHelperScope guard(this, zd, pg, zn);
3699 asrd(zd, pg, zd, shift);
3700 }
Bic(const PRegisterWithLaneSize & pd,const PRegisterZ & pg,const PRegisterWithLaneSize & pn,const PRegisterWithLaneSize & pm)3701 void Bic(const PRegisterWithLaneSize& pd,
3702 const PRegisterZ& pg,
3703 const PRegisterWithLaneSize& pn,
3704 const PRegisterWithLaneSize& pm) {
3705 VIXL_ASSERT(allow_macro_instructions_);
3706 SingleEmissionCheckScope guard(this);
3707 bic(pd, pg, pn, pm);
3708 }
Bic(const ZRegister & zd,const ZRegister & zn,const ZRegister & zm)3709