1 // Copyright 2015, VIXL authors
2 // All rights reserved.
3 //
4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are met:
6 //
7 // * Redistributions of source code must retain the above copyright notice,
8 // this list of conditions and the following disclaimer.
9 // * Redistributions in binary form must reproduce the above copyright notice,
10 // this list of conditions and the following disclaimer in the documentation
11 // and/or other materials provided with the distribution.
12 // * Neither the name of ARM Limited nor the names of its contributors may be
13 // used to endorse or promote products derived from this software without
14 // specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
27 #include <sys/mman.h>
28
29 #include <cfloat>
30 #include <cmath>
31 #include <cstdio>
32 #include <cstdlib>
33 #include <cstring>
34
35 #include "test-runner.h"
36 #include "test-utils.h"
37 #include "aarch64/test-utils-aarch64.h"
38
39 #include "aarch64/cpu-aarch64.h"
40 #include "aarch64/disasm-aarch64.h"
41 #include "aarch64/macro-assembler-aarch64.h"
42 #include "aarch64/simulator-aarch64.h"
43 #include "test-assembler-aarch64.h"
44
45 namespace vixl {
46 namespace aarch64 {
47
TEST(preshift_immediates)48 TEST(preshift_immediates) {
49 SETUP();
50
51 START();
52 // Test operations involving immediates that could be generated using a
53 // pre-shifted encodable immediate followed by a post-shift applied to
54 // the arithmetic or logical operation.
55
56 // Save sp.
57 __ Mov(x29, sp);
58
59 // Set the registers to known values.
60 __ Mov(x0, 0x1000);
61 __ Mov(sp, 0x1004);
62
63 // Arithmetic ops.
64 __ Add(x1, x0, 0x1f7de);
65 __ Add(w2, w0, 0xffffff1);
66 __ Adds(x3, x0, 0x18001);
67 __ Adds(w4, w0, 0xffffff1);
68 __ Sub(x5, x0, 0x1f7de);
69 __ Sub(w6, w0, 0xffffff1);
70 __ Subs(x7, x0, 0x18001);
71 __ Subs(w8, w0, 0xffffff1);
72
73 // Logical ops.
74 __ And(x9, x0, 0x1f7de);
75 __ Orr(w10, w0, 0xffffff1);
76 __ Eor(x11, x0, 0x18001);
77
78 // Ops using the stack pointer.
79 __ Add(sp, sp, 0x18001);
80 __ Mov(x12, sp);
81 __ Mov(sp, 0x1004);
82
83 __ Add(sp, sp, 0x1f7de);
84 __ Mov(x13, sp);
85 __ Mov(sp, 0x1004);
86
87 __ Adds(x14, sp, 0x1f7de);
88
89 __ Orr(sp, x0, 0x1f7de);
90 __ Mov(x15, sp);
91
92 // Restore sp.
93 __ Mov(sp, x29);
94 END();
95
96 if (CAN_RUN()) {
97 RUN();
98
99 ASSERT_EQUAL_64(0x1000, x0);
100 ASSERT_EQUAL_64(0x207de, x1);
101 ASSERT_EQUAL_64(0x10000ff1, x2);
102 ASSERT_EQUAL_64(0x19001, x3);
103 ASSERT_EQUAL_64(0x10000ff1, x4);
104 ASSERT_EQUAL_64(0xfffffffffffe1822, x5);
105 ASSERT_EQUAL_64(0xf000100f, x6);
106 ASSERT_EQUAL_64(0xfffffffffffe8fff, x7);
107 ASSERT_EQUAL_64(0xf000100f, x8);
108 ASSERT_EQUAL_64(0x1000, x9);
109 ASSERT_EQUAL_64(0xffffff1, x10);
110 ASSERT_EQUAL_64(0x19001, x11);
111 ASSERT_EQUAL_64(0x19005, x12);
112 ASSERT_EQUAL_64(0x207e2, x13);
113 ASSERT_EQUAL_64(0x207e2, x14);
114 ASSERT_EQUAL_64(0x1f7de, x15);
115 }
116 }
117
118
TEST(stack_ops)119 TEST(stack_ops) {
120 SETUP();
121
122 START();
123 // save sp.
124 __ Mov(x29, sp);
125
126 // Set the sp to a known value.
127 __ Mov(sp, 0x1004);
128 __ Mov(x0, sp);
129
130 // Add immediate to the sp, and move the result to a normal register.
131 __ Add(sp, sp, 0x50);
132 __ Mov(x1, sp);
133
134 // Add extended to the sp, and move the result to a normal register.
135 __ Mov(x17, 0xfff);
136 __ Add(sp, sp, Operand(x17, SXTB));
137 __ Mov(x2, sp);
138
139 // Create an sp using a logical instruction, and move to normal register.
140 __ Orr(sp, xzr, 0x1fff);
141 __ Mov(x3, sp);
142
143 // Write wsp using a logical instruction.
144 __ Orr(wsp, wzr, 0xfffffff8);
145 __ Mov(x4, sp);
146
147 // Write sp, and read back wsp.
148 __ Orr(sp, xzr, 0xfffffff8);
149 __ Mov(w5, wsp);
150
151 // Test writing into wsp in cases where the immediate isn't encodable.
152 VIXL_ASSERT(!Assembler::IsImmLogical(0x1234, kWRegSize));
153 __ Orr(wsp, w5, 0x1234);
154 __ Mov(w6, wsp);
155
156 // restore sp.
157 __ Mov(sp, x29);
158 END();
159
160 if (CAN_RUN()) {
161 RUN();
162
163 ASSERT_EQUAL_64(0x1004, x0);
164 ASSERT_EQUAL_64(0x1054, x1);
165 ASSERT_EQUAL_64(0x1053, x2);
166 ASSERT_EQUAL_64(0x1fff, x3);
167 ASSERT_EQUAL_64(0xfffffff8, x4);
168 ASSERT_EQUAL_64(0xfffffff8, x5);
169 ASSERT_EQUAL_64(0xfffffffc, x6);
170 }
171 }
172
173
TEST(mvn)174 TEST(mvn) {
175 SETUP();
176
177 START();
178 __ Mvn(w0, 0xfff);
179 __ Mvn(x1, 0xfff);
180 __ Mvn(w2, Operand(w0, LSL, 1));
181 __ Mvn(x3, Operand(x1, LSL, 2));
182 __ Mvn(w4, Operand(w0, LSR, 3));
183 __ Mvn(x5, Operand(x1, LSR, 4));
184 __ Mvn(w6, Operand(w0, ASR, 11));
185 __ Mvn(x7, Operand(x1, ASR, 12));
186 __ Mvn(w8, Operand(w0, ROR, 13));
187 __ Mvn(x9, Operand(x1, ROR, 14));
188 __ Mvn(w10, Operand(w2, UXTB));
189 __ Mvn(x11, Operand(x2, SXTB, 1));
190 __ Mvn(w12, Operand(w2, UXTH, 2));
191 __ Mvn(x13, Operand(x2, SXTH, 3));
192 __ Mvn(x14, Operand(w2, UXTW, 4));
193 __ Mvn(x15, Operand(w2, SXTW, 4));
194 END();
195
196 if (CAN_RUN()) {
197 RUN();
198
199 ASSERT_EQUAL_64(0xfffff000, x0);
200 ASSERT_EQUAL_64(0xfffffffffffff000, x1);
201 ASSERT_EQUAL_64(0x00001fff, x2);
202 ASSERT_EQUAL_64(0x0000000000003fff, x3);
203 ASSERT_EQUAL_64(0xe00001ff, x4);
204 ASSERT_EQUAL_64(0xf0000000000000ff, x5);
205 ASSERT_EQUAL_64(0x00000001, x6);
206 ASSERT_EQUAL_64(0x0000000000000000, x7);
207 ASSERT_EQUAL_64(0x7ff80000, x8);
208 ASSERT_EQUAL_64(0x3ffc000000000000, x9);
209 ASSERT_EQUAL_64(0xffffff00, x10);
210 ASSERT_EQUAL_64(0x0000000000000001, x11);
211 ASSERT_EQUAL_64(0xffff8003, x12);
212 ASSERT_EQUAL_64(0xffffffffffff0007, x13);
213 ASSERT_EQUAL_64(0xfffffffffffe000f, x14);
214 ASSERT_EQUAL_64(0xfffffffffffe000f, x15);
215 }
216 }
217
218
TEST(mov_imm_w)219 TEST(mov_imm_w) {
220 SETUP();
221
222 START();
223 __ Mov(w0, 0xffffffff);
224 __ Mov(w1, 0xffff1234);
225 __ Mov(w2, 0x1234ffff);
226 __ Mov(w3, 0x00000000);
227 __ Mov(w4, 0x00001234);
228 __ Mov(w5, 0x12340000);
229 __ Mov(w6, 0x12345678);
230 __ Mov(w7, (int32_t)0x80000000);
231 __ Mov(w8, (int32_t)0xffff0000);
232 __ Mov(w9, kWMinInt);
233 END();
234
235 if (CAN_RUN()) {
236 RUN();
237
238 ASSERT_EQUAL_64(0xffffffff, x0);
239 ASSERT_EQUAL_64(0xffff1234, x1);
240 ASSERT_EQUAL_64(0x1234ffff, x2);
241 ASSERT_EQUAL_64(0x00000000, x3);
242 ASSERT_EQUAL_64(0x00001234, x4);
243 ASSERT_EQUAL_64(0x12340000, x5);
244 ASSERT_EQUAL_64(0x12345678, x6);
245 ASSERT_EQUAL_64(0x80000000, x7);
246 ASSERT_EQUAL_64(0xffff0000, x8);
247 ASSERT_EQUAL_32(kWMinInt, w9);
248 }
249 }
250
251
TEST(mov_imm_x)252 TEST(mov_imm_x) {
253 SETUP();
254
255 START();
256 __ Mov(x0, 0xffffffffffffffff);
257 __ Mov(x1, 0xffffffffffff1234);
258 __ Mov(x2, 0xffffffff12345678);
259 __ Mov(x3, 0xffff1234ffff5678);
260 __ Mov(x4, 0x1234ffffffff5678);
261 __ Mov(x5, 0x1234ffff5678ffff);
262 __ Mov(x6, 0x12345678ffffffff);
263 __ Mov(x7, 0x1234ffffffffffff);
264 __ Mov(x8, 0x123456789abcffff);
265 __ Mov(x9, 0x12345678ffff9abc);
266 __ Mov(x10, 0x1234ffff56789abc);
267 __ Mov(x11, 0xffff123456789abc);
268 __ Mov(x12, 0x0000000000000000);
269 __ Mov(x13, 0x0000000000001234);
270 __ Mov(x14, 0x0000000012345678);
271 __ Mov(x15, 0x0000123400005678);
272 __ Mov(x18, 0x1234000000005678);
273 __ Mov(x19, 0x1234000056780000);
274 __ Mov(x20, 0x1234567800000000);
275 __ Mov(x21, 0x1234000000000000);
276 __ Mov(x22, 0x123456789abc0000);
277 __ Mov(x23, 0x1234567800009abc);
278 __ Mov(x24, 0x1234000056789abc);
279 __ Mov(x25, 0x0000123456789abc);
280 __ Mov(x26, 0x123456789abcdef0);
281 __ Mov(x27, 0xffff000000000001);
282 __ Mov(x28, 0x8000ffff00000000);
283 END();
284
285 if (CAN_RUN()) {
286 RUN();
287
288 ASSERT_EQUAL_64(0xffffffffffff1234, x1);
289 ASSERT_EQUAL_64(0xffffffff12345678, x2);
290 ASSERT_EQUAL_64(0xffff1234ffff5678, x3);
291 ASSERT_EQUAL_64(0x1234ffffffff5678, x4);
292 ASSERT_EQUAL_64(0x1234ffff5678ffff, x5);
293 ASSERT_EQUAL_64(0x12345678ffffffff, x6);
294 ASSERT_EQUAL_64(0x1234ffffffffffff, x7);
295 ASSERT_EQUAL_64(0x123456789abcffff, x8);
296 ASSERT_EQUAL_64(0x12345678ffff9abc, x9);
297 ASSERT_EQUAL_64(0x1234ffff56789abc, x10);
298 ASSERT_EQUAL_64(0xffff123456789abc, x11);
299 ASSERT_EQUAL_64(0x0000000000000000, x12);
300 ASSERT_EQUAL_64(0x0000000000001234, x13);
301 ASSERT_EQUAL_64(0x0000000012345678, x14);
302 ASSERT_EQUAL_64(0x0000123400005678, x15);
303 ASSERT_EQUAL_64(0x1234000000005678, x18);
304 ASSERT_EQUAL_64(0x1234000056780000, x19);
305 ASSERT_EQUAL_64(0x1234567800000000, x20);
306 ASSERT_EQUAL_64(0x1234000000000000, x21);
307 ASSERT_EQUAL_64(0x123456789abc0000, x22);
308 ASSERT_EQUAL_64(0x1234567800009abc, x23);
309 ASSERT_EQUAL_64(0x1234000056789abc, x24);
310 ASSERT_EQUAL_64(0x0000123456789abc, x25);
311 ASSERT_EQUAL_64(0x123456789abcdef0, x26);
312 ASSERT_EQUAL_64(0xffff000000000001, x27);
313 ASSERT_EQUAL_64(0x8000ffff00000000, x28);
314 }
315 }
316
317
TEST(mov)318 TEST(mov) {
319 SETUP();
320
321 START();
322 __ Mov(x0, 0xffffffffffffffff);
323 __ Mov(x1, 0xffffffffffffffff);
324 __ Mov(x2, 0xffffffffffffffff);
325 __ Mov(x3, 0xffffffffffffffff);
326
327 __ Mov(x0, 0x0123456789abcdef);
328
329 {
330 ExactAssemblyScope scope(&masm, 3 * kInstructionSize);
331 __ movz(x1, UINT64_C(0xabcd) << 16);
332 __ movk(x2, UINT64_C(0xabcd) << 32);
333 __ movn(x3, UINT64_C(0xabcd) << 48);
334 }
335
336 __ Mov(x4, 0x0123456789abcdef);
337 __ Mov(x5, x4);
338
339 __ Mov(w6, -1);
340
341 // Test that moves back to the same register have the desired effect. This
342 // is a no-op for X registers, and a truncation for W registers.
343 __ Mov(x7, 0x0123456789abcdef);
344 __ Mov(x7, x7);
345 __ Mov(x8, 0x0123456789abcdef);
346 __ Mov(w8, w8);
347 __ Mov(x9, 0x0123456789abcdef);
348 __ Mov(x9, Operand(x9));
349 __ Mov(x10, 0x0123456789abcdef);
350 __ Mov(w10, Operand(w10));
351
352 __ Mov(w11, 0xfff);
353 __ Mov(x12, 0xfff);
354 __ Mov(w13, Operand(w11, LSL, 1));
355 __ Mov(x14, Operand(x12, LSL, 2));
356 __ Mov(w15, Operand(w11, LSR, 3));
357 __ Mov(x18, Operand(x12, LSR, 4));
358 __ Mov(w19, Operand(w11, ASR, 11));
359 __ Mov(x20, Operand(x12, ASR, 12));
360 __ Mov(w21, Operand(w11, ROR, 13));
361 __ Mov(x22, Operand(x12, ROR, 14));
362 __ Mov(w23, Operand(w13, UXTB));
363 __ Mov(x24, Operand(x13, SXTB, 1));
364 __ Mov(w25, Operand(w13, UXTH, 2));
365 __ Mov(x26, Operand(x13, SXTH, 3));
366 __ Mov(x27, Operand(w13, UXTW, 4));
367
368 __ Mov(x28, 0x0123456789abcdef);
369 __ Mov(w28, w28, kDiscardForSameWReg);
370 END();
371
372 if (CAN_RUN()) {
373 RUN();
374
375 ASSERT_EQUAL_64(0x0123456789abcdef, x0);
376 ASSERT_EQUAL_64(0x00000000abcd0000, x1);
377 ASSERT_EQUAL_64(0xffffabcdffffffff, x2);
378 ASSERT_EQUAL_64(0x5432ffffffffffff, x3);
379 ASSERT_EQUAL_64(x4, x5);
380 ASSERT_EQUAL_32(-1, w6);
381 ASSERT_EQUAL_64(0x0123456789abcdef, x7);
382 ASSERT_EQUAL_32(0x89abcdef, w8);
383 ASSERT_EQUAL_64(0x0123456789abcdef, x9);
384 ASSERT_EQUAL_32(0x89abcdef, w10);
385 ASSERT_EQUAL_64(0x00000fff, x11);
386 ASSERT_EQUAL_64(0x0000000000000fff, x12);
387 ASSERT_EQUAL_64(0x00001ffe, x13);
388 ASSERT_EQUAL_64(0x0000000000003ffc, x14);
389 ASSERT_EQUAL_64(0x000001ff, x15);
390 ASSERT_EQUAL_64(0x00000000000000ff, x18);
391 ASSERT_EQUAL_64(0x00000001, x19);
392 ASSERT_EQUAL_64(0x0000000000000000, x20);
393 ASSERT_EQUAL_64(0x7ff80000, x21);
394 ASSERT_EQUAL_64(0x3ffc000000000000, x22);
395 ASSERT_EQUAL_64(0x000000fe, x23);
396 ASSERT_EQUAL_64(0xfffffffffffffffc, x24);
397 ASSERT_EQUAL_64(0x00007ff8, x25);
398 ASSERT_EQUAL_64(0x000000000000fff0, x26);
399 ASSERT_EQUAL_64(0x000000000001ffe0, x27);
400 ASSERT_EQUAL_64(0x0123456789abcdef, x28);
401 }
402 }
403
404
TEST(mov_negative)405 TEST(mov_negative) {
406 SETUP();
407
408 START();
409 __ Mov(w11, 0xffffffff);
410 __ Mov(x12, 0xffffffffffffffff);
411
412 __ Mov(w13, Operand(w11, LSL, 1));
413 __ Mov(w14, Operand(w11, LSR, 1));
414 __ Mov(w15, Operand(w11, ASR, 1));
415 __ Mov(w18, Operand(w11, ROR, 1));
416 __ Mov(w19, Operand(w11, UXTB, 1));
417 __ Mov(w20, Operand(w11, SXTB, 1));
418 __ Mov(w21, Operand(w11, UXTH, 1));
419 __ Mov(w22, Operand(w11, SXTH, 1));
420
421 __ Mov(x23, Operand(x12, LSL, 1));
422 __ Mov(x24, Operand(x12, LSR, 1));
423 __ Mov(x25, Operand(x12, ASR, 1));
424 __ Mov(x26, Operand(x12, ROR, 1));
425 __ Mov(x27, Operand(x12, UXTH, 1));
426 __ Mov(x28, Operand(x12, SXTH, 1));
427 __ Mov(x29, Operand(x12, UXTW, 1));
428 __ Mov(x30, Operand(x12, SXTW, 1));
429 END();
430
431 if (CAN_RUN()) {
432 RUN();
433
434 ASSERT_EQUAL_64(0xfffffffe, x13);
435 ASSERT_EQUAL_64(0x7fffffff, x14);
436 ASSERT_EQUAL_64(0xffffffff, x15);
437 ASSERT_EQUAL_64(0xffffffff, x18);
438 ASSERT_EQUAL_64(0x000001fe, x19);
439 ASSERT_EQUAL_64(0xfffffffe, x20);
440 ASSERT_EQUAL_64(0x0001fffe, x21);
441 ASSERT_EQUAL_64(0xfffffffe, x22);
442
443 ASSERT_EQUAL_64(0xfffffffffffffffe, x23);
444 ASSERT_EQUAL_64(0x7fffffffffffffff, x24);
445 ASSERT_EQUAL_64(0xffffffffffffffff, x25);
446 ASSERT_EQUAL_64(0xffffffffffffffff, x26);
447 ASSERT_EQUAL_64(0x000000000001fffe, x27);
448 ASSERT_EQUAL_64(0xfffffffffffffffe, x28);
449 ASSERT_EQUAL_64(0x00000001fffffffe, x29);
450 ASSERT_EQUAL_64(0xfffffffffffffffe, x30);
451 }
452 }
453
454
TEST(orr)455 TEST(orr) {
456 SETUP();
457
458 START();
459 __ Mov(x0, 0xf0f0);
460 __ Mov(x1, 0xf00000ff);
461
462 __ Orr(x2, x0, Operand(x1));
463 __ Orr(w3, w0, Operand(w1, LSL, 28));
464 __ Orr(x4, x0, Operand(x1, LSL, 32));
465 __ Orr(x5, x0, Operand(x1, LSR, 4));
466 __ Orr(w6, w0, Operand(w1, ASR, 4));
467 __ Orr(x7, x0, Operand(x1, ASR, 4));
468 __ Orr(w8, w0, Operand(w1, ROR, 12));
469 __ Orr(x9, x0, Operand(x1, ROR, 12));
470 __ Orr(w10, w0, 0xf);
471 __ Orr(x11, x0, 0xf0000000f0000000);
472 END();
473
474 if (CAN_RUN()) {
475 RUN();
476
477 ASSERT_EQUAL_64(0x00000000f000f0ff, x2);
478 ASSERT_EQUAL_64(0xf000f0f0, x3);
479 ASSERT_EQUAL_64(0xf00000ff0000f0f0, x4);
480 ASSERT_EQUAL_64(0x000000000f00f0ff, x5);
481 ASSERT_EQUAL_64(0xff00f0ff, x6);
482 ASSERT_EQUAL_64(0x000000000f00f0ff, x7);
483 ASSERT_EQUAL_64(0x0ffff0f0, x8);
484 ASSERT_EQUAL_64(0x0ff00000000ff0f0, x9);
485 ASSERT_EQUAL_64(0x0000f0ff, x10);
486 ASSERT_EQUAL_64(0xf0000000f000f0f0, x11);
487 }
488 }
489
490
TEST(orr_extend)491 TEST(orr_extend) {
492 SETUP();
493
494 START();
495 __ Mov(x0, 1);
496 __ Mov(x1, 0x8000000080008080);
497 __ Orr(w6, w0, Operand(w1, UXTB));
498 __ Orr(x7, x0, Operand(x1, UXTH, 1));
499 __ Orr(w8, w0, Operand(w1, UXTW, 2));
500 __ Orr(x9, x0, Operand(x1, UXTX, 3));
501 __ Orr(w10, w0, Operand(w1, SXTB));
502 __ Orr(x11, x0, Operand(x1, SXTH, 1));
503 __ Orr(x12, x0, Operand(x1, SXTW, 2));
504 __ Orr(x13, x0, Operand(x1, SXTX, 3));
505 END();
506
507 if (CAN_RUN()) {
508 RUN();
509
510 ASSERT_EQUAL_64(0x00000081, x6);
511 ASSERT_EQUAL_64(0x0000000000010101, x7);
512 ASSERT_EQUAL_64(0x00020201, x8);
513 ASSERT_EQUAL_64(0x0000000400040401, x9);
514 ASSERT_EQUAL_64(0xffffff81, x10);
515 ASSERT_EQUAL_64(0xffffffffffff0101, x11);
516 ASSERT_EQUAL_64(0xfffffffe00020201, x12);
517 ASSERT_EQUAL_64(0x0000000400040401, x13);
518 }
519 }
520
521
TEST(bitwise_wide_imm)522 TEST(bitwise_wide_imm) {
523 SETUP();
524
525 START();
526 __ Mov(x0, 0);
527 __ Mov(x1, 0xf0f0f0f0f0f0f0f0);
528
529 __ Orr(x10, x0, 0x1234567890abcdef);
530 __ Orr(w11, w1, 0x90abcdef);
531
532 __ Orr(w12, w0, kWMinInt);
533 __ Eor(w13, w0, kWMinInt);
534 END();
535
536 if (CAN_RUN()) {
537 RUN();
538
539 ASSERT_EQUAL_64(0, x0);
540 ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0, x1);
541 ASSERT_EQUAL_64(0x1234567890abcdef, x10);
542 ASSERT_EQUAL_64(0x00000000f0fbfdff, x11);
543 ASSERT_EQUAL_32(kWMinInt, w12);
544 ASSERT_EQUAL_32(kWMinInt, w13);
545 }
546 }
547
548
TEST(orn)549 TEST(orn) {
550 SETUP();
551
552 START();
553 __ Mov(x0, 0xf0f0);
554 __ Mov(x1, 0xf00000ff);
555
556 __ Orn(x2, x0, Operand(x1));
557 __ Orn(w3, w0, Operand(w1, LSL, 4));
558 __ Orn(x4, x0, Operand(x1, LSL, 4));
559 __ Orn(x5, x0, Operand(x1, LSR, 1));
560 __ Orn(w6, w0, Operand(w1, ASR, 1));
561 __ Orn(x7, x0, Operand(x1, ASR, 1));
562 __ Orn(w8, w0, Operand(w1, ROR, 16));
563 __ Orn(x9, x0, Operand(x1, ROR, 16));
564 __ Orn(w10, w0, 0x0000ffff);
565 __ Orn(x11, x0, 0x0000ffff0000ffff);
566 END();
567
568 if (CAN_RUN()) {
569 RUN();
570
571 ASSERT_EQUAL_64(0xffffffff0ffffff0, x2);
572 ASSERT_EQUAL_64(0xfffff0ff, x3);
573 ASSERT_EQUAL_64(0xfffffff0fffff0ff, x4);
574 ASSERT_EQUAL_64(0xffffffff87fffff0, x5);
575 ASSERT_EQUAL_64(0x07fffff0, x6);
576 ASSERT_EQUAL_64(0xffffffff87fffff0, x7);
577 ASSERT_EQUAL_64(0xff00ffff, x8);
578 ASSERT_EQUAL_64(0xff00ffffffffffff, x9);
579 ASSERT_EQUAL_64(0xfffff0f0, x10);
580 ASSERT_EQUAL_64(0xffff0000fffff0f0, x11);
581 }
582 }
583
584
TEST(orn_extend)585 TEST(orn_extend) {
586 SETUP();
587
588 START();
589 __ Mov(x0, 1);
590 __ Mov(x1, 0x8000000080008081);
591 __ Orn(w6, w0, Operand(w1, UXTB));
592 __ Orn(x7, x0, Operand(x1, UXTH, 1));
593 __ Orn(w8, w0, Operand(w1, UXTW, 2));
594 __ Orn(x9, x0, Operand(x1, UXTX, 3));
595 __ Orn(w10, w0, Operand(w1, SXTB));
596 __ Orn(x11, x0, Operand(x1, SXTH, 1));
597 __ Orn(x12, x0, Operand(x1, SXTW, 2));
598 __ Orn(x13, x0, Operand(x1, SXTX, 3));
599 END();
600
601 if (CAN_RUN()) {
602 RUN();
603
604 ASSERT_EQUAL_64(0xffffff7f, x6);
605 ASSERT_EQUAL_64(0xfffffffffffefefd, x7);
606 ASSERT_EQUAL_64(0xfffdfdfb, x8);
607 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x9);
608 ASSERT_EQUAL_64(0x0000007f, x10);
609 ASSERT_EQUAL_64(0x000000000000fefd, x11);
610 ASSERT_EQUAL_64(0x00000001fffdfdfb, x12);
611 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x13);
612 }
613 }
614
615
TEST(and_)616 TEST(and_) {
617 SETUP();
618
619 START();
620 __ Mov(x0, 0xfff0);
621 __ Mov(x1, 0xf00000ff);
622
623 __ And(x2, x0, Operand(x1));
624 __ And(w3, w0, Operand(w1, LSL, 4));
625 __ And(x4, x0, Operand(x1, LSL, 4));
626 __ And(x5, x0, Operand(x1, LSR, 1));
627 __ And(w6, w0, Operand(w1, ASR, 20));
628 __ And(x7, x0, Operand(x1, ASR, 20));
629 __ And(w8, w0, Operand(w1, ROR, 28));
630 __ And(x9, x0, Operand(x1, ROR, 28));
631 __ And(w10, w0, Operand(0xff00));
632 __ And(x11, x0, Operand(0xff));
633 END();
634
635 if (CAN_RUN()) {
636 RUN();
637
638 ASSERT_EQUAL_64(0x000000f0, x2);
639 ASSERT_EQUAL_64(0x00000ff0, x3);
640 ASSERT_EQUAL_64(0x00000ff0, x4);
641 ASSERT_EQUAL_64(0x00000070, x5);
642 ASSERT_EQUAL_64(0x0000ff00, x6);
643 ASSERT_EQUAL_64(0x00000f00, x7);
644 ASSERT_EQUAL_64(0x00000ff0, x8);
645 ASSERT_EQUAL_64(0x00000000, x9);
646 ASSERT_EQUAL_64(0x0000ff00, x10);
647 ASSERT_EQUAL_64(0x000000f0, x11);
648 }
649 }
650
651
TEST(and_extend)652 TEST(and_extend) {
653 SETUP();
654
655 START();
656 __ Mov(x0, 0xffffffffffffffff);
657 __ Mov(x1, 0x8000000080008081);
658 __ And(w6, w0, Operand(w1, UXTB));
659 __ And(x7, x0, Operand(x1, UXTH, 1));
660 __ And(w8, w0, Operand(w1, UXTW, 2));
661 __ And(x9, x0, Operand(x1, UXTX, 3));
662 __ And(w10, w0, Operand(w1, SXTB));
663 __ And(x11, x0, Operand(x1, SXTH, 1));
664 __ And(x12, x0, Operand(x1, SXTW, 2));
665 __ And(x13, x0, Operand(x1, SXTX, 3));
666 END();
667
668 if (CAN_RUN()) {
669 RUN();
670
671 ASSERT_EQUAL_64(0x00000081, x6);
672 ASSERT_EQUAL_64(0x0000000000010102, x7);
673 ASSERT_EQUAL_64(0x00020204, x8);
674 ASSERT_EQUAL_64(0x0000000400040408, x9);
675 ASSERT_EQUAL_64(0xffffff81, x10);
676 ASSERT_EQUAL_64(0xffffffffffff0102, x11);
677 ASSERT_EQUAL_64(0xfffffffe00020204, x12);
678 ASSERT_EQUAL_64(0x0000000400040408, x13);
679 }
680 }
681
682
TEST(ands)683 TEST(ands) {
684 SETUP();
685
686 START();
687 __ Mov(x1, 0xf00000ff);
688 __ Ands(w0, w1, Operand(w1));
689 END();
690
691 if (CAN_RUN()) {
692 RUN();
693
694 ASSERT_EQUAL_NZCV(NFlag);
695 ASSERT_EQUAL_64(0xf00000ff, x0);
696 }
697
698 START();
699 __ Mov(x0, 0xfff0);
700 __ Mov(x1, 0xf00000ff);
701 __ Ands(w0, w0, Operand(w1, LSR, 4));
702 END();
703
704 if (CAN_RUN()) {
705 RUN();
706
707 ASSERT_EQUAL_NZCV(ZFlag);
708 ASSERT_EQUAL_64(0x00000000, x0);
709 }
710
711 START();
712 __ Mov(x0, 0x8000000000000000);
713 __ Mov(x1, 0x00000001);
714 __ Ands(x0, x0, Operand(x1, ROR, 1));
715 END();
716
717 if (CAN_RUN()) {
718 RUN();
719
720 ASSERT_EQUAL_NZCV(NFlag);
721 ASSERT_EQUAL_64(0x8000000000000000, x0);
722 }
723
724 START();
725 __ Mov(x0, 0xfff0);
726 __ Ands(w0, w0, Operand(0xf));
727 END();
728
729 if (CAN_RUN()) {
730 RUN();
731
732 ASSERT_EQUAL_NZCV(ZFlag);
733 ASSERT_EQUAL_64(0x00000000, x0);
734 }
735
736 START();
737 __ Mov(x0, 0xff000000);
738 __ Ands(w0, w0, Operand(0x80000000));
739 END();
740
741 if (CAN_RUN()) {
742 RUN();
743
744 ASSERT_EQUAL_NZCV(NFlag);
745 ASSERT_EQUAL_64(0x80000000, x0);
746 }
747 }
748
749
TEST(bic)750 TEST(bic) {
751 SETUP();
752
753 START();
754 __ Mov(x0, 0xfff0);
755 __ Mov(x1, 0xf00000ff);
756
757 __ Bic(x2, x0, Operand(x1));
758 __ Bic(w3, w0, Operand(w1, LSL, 4));
759 __ Bic(x4, x0, Operand(x1, LSL, 4));
760 __ Bic(x5, x0, Operand(x1, LSR, 1));
761 __ Bic(w6, w0, Operand(w1, ASR, 20));
762 __ Bic(x7, x0, Operand(x1, ASR, 20));
763 __ Bic(w8, w0, Operand(w1, ROR, 28));
764 __ Bic(x9, x0, Operand(x1, ROR, 24));
765 __ Bic(x10, x0, Operand(0x1f));
766 __ Bic(x11, x0, Operand(0x100));
767
768 // Test bic into sp when the constant cannot be encoded in the immediate
769 // field.
770 // Use x20 to preserve sp. We check for the result via x21 because the
771 // test infrastructure requires that sp be restored to its original value.
772 __ Mov(x20, sp);
773 __ Mov(x0, 0xffffff);
774 __ Bic(sp, x0, Operand(0xabcdef));
775 __ Mov(x21, sp);
776 __ Mov(sp, x20);
777 END();
778
779 if (CAN_RUN()) {
780 RUN();
781
782 ASSERT_EQUAL_64(0x0000ff00, x2);
783 ASSERT_EQUAL_64(0x0000f000, x3);
784 ASSERT_EQUAL_64(0x0000f000, x4);
785 ASSERT_EQUAL_64(0x0000ff80, x5);
786 ASSERT_EQUAL_64(0x000000f0, x6);
787 ASSERT_EQUAL_64(0x0000f0f0, x7);
788 ASSERT_EQUAL_64(0x0000f000, x8);
789 ASSERT_EQUAL_64(0x0000ff00, x9);
790 ASSERT_EQUAL_64(0x0000ffe0, x10);
791 ASSERT_EQUAL_64(0x0000fef0, x11);
792
793 ASSERT_EQUAL_64(0x543210, x21);
794 }
795 }
796
797
TEST(bic_extend)798 TEST(bic_extend) {
799 SETUP();
800
801 START();
802 __ Mov(x0, 0xffffffffffffffff);
803 __ Mov(x1, 0x8000000080008081);
804 __ Bic(w6, w0, Operand(w1, UXTB));
805 __ Bic(x7, x0, Operand(x1, UXTH, 1));
806 __ Bic(w8, w0, Operand(w1, UXTW, 2));
807 __ Bic(x9, x0, Operand(x1, UXTX, 3));
808 __ Bic(w10, w0, Operand(w1, SXTB));
809 __ Bic(x11, x0, Operand(x1, SXTH, 1));
810 __ Bic(x12, x0, Operand(x1, SXTW, 2));
811 __ Bic(x13, x0, Operand(x1, SXTX, 3));
812 END();
813
814 if (CAN_RUN()) {
815 RUN();
816
817 ASSERT_EQUAL_64(0xffffff7e, x6);
818 ASSERT_EQUAL_64(0xfffffffffffefefd, x7);
819 ASSERT_EQUAL_64(0xfffdfdfb, x8);
820 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x9);
821 ASSERT_EQUAL_64(0x0000007e, x10);
822 ASSERT_EQUAL_64(0x000000000000fefd, x11);
823 ASSERT_EQUAL_64(0x00000001fffdfdfb, x12);
824 ASSERT_EQUAL_64(0xfffffffbfffbfbf7, x13);
825 }
826 }
827
828
TEST(bics)829 TEST(bics) {
830 SETUP();
831
832 START();
833 __ Mov(x1, 0xffff);
834 __ Bics(w0, w1, Operand(w1));
835 END();
836
837 if (CAN_RUN()) {
838 RUN();
839
840 ASSERT_EQUAL_NZCV(ZFlag);
841 ASSERT_EQUAL_64(0x00000000, x0);
842 }
843
844 START();
845 __ Mov(x0, 0xffffffff);
846 __ Bics(w0, w0, Operand(w0, LSR, 1));
847 END();
848
849 if (CAN_RUN()) {
850 RUN();
851
852 ASSERT_EQUAL_NZCV(NFlag);
853 ASSERT_EQUAL_64(0x80000000, x0);
854 }
855
856 START();
857 __ Mov(x0, 0x8000000000000000);
858 __ Mov(x1, 0x00000001);
859 __ Bics(x0, x0, Operand(x1, ROR, 1));
860 END();
861
862 if (CAN_RUN()) {
863 RUN();
864
865 ASSERT_EQUAL_NZCV(ZFlag);
866 ASSERT_EQUAL_64(0x00000000, x0);
867 }
868
869 START();
870 __ Mov(x0, 0xffffffffffffffff);
871 __ Bics(x0, x0, 0x7fffffffffffffff);
872 END();
873
874 if (CAN_RUN()) {
875 RUN();
876
877 ASSERT_EQUAL_NZCV(NFlag);
878 ASSERT_EQUAL_64(0x8000000000000000, x0);
879 }
880
881 START();
882 __ Mov(w0, 0xffff0000);
883 __ Bics(w0, w0, 0xfffffff0);
884 END();
885
886 if (CAN_RUN()) {
887 RUN();
888
889 ASSERT_EQUAL_NZCV(ZFlag);
890 ASSERT_EQUAL_64(0x00000000, x0);
891 }
892 }
893
894
TEST(eor)895 TEST(eor) {
896 SETUP();
897
898 START();
899 __ Mov(x0, 0xfff0);
900 __ Mov(x1, 0xf00000ff);
901
902 __ Eor(x2, x0, Operand(x1));
903 __ Eor(w3, w0, Operand(w1, LSL, 4));
904 __ Eor(x4, x0, Operand(x1, LSL, 4));
905 __ Eor(x5, x0, Operand(x1, LSR, 1));
906 __ Eor(w6, w0, Operand(w1, ASR, 20));
907 __ Eor(x7, x0, Operand(x1, ASR, 20));
908 __ Eor(w8, w0, Operand(w1, ROR, 28));
909 __ Eor(x9, x0, Operand(x1, ROR, 28));
910 __ Eor(w10, w0, 0xff00ff00);
911 __ Eor(x11, x0, 0xff00ff00ff00ff00);
912 END();
913
914 if (CAN_RUN()) {
915 RUN();
916
917 ASSERT_EQUAL_64(0x00000000f000ff0f, x2);
918 ASSERT_EQUAL_64(0x0000f000, x3);
919 ASSERT_EQUAL_64(0x0000000f0000f000, x4);
920 ASSERT_EQUAL_64(0x000000007800ff8f, x5);
921 ASSERT_EQUAL_64(0xffff00f0, x6);
922 ASSERT_EQUAL_64(0x000000000000f0f0, x7);
923 ASSERT_EQUAL_64(0x0000f00f, x8);
924 ASSERT_EQUAL_64(0x00000ff00000ffff, x9);
925 ASSERT_EQUAL_64(0xff0000f0, x10);
926 ASSERT_EQUAL_64(0xff00ff00ff0000f0, x11);
927 }
928 }
929
TEST(eor_extend)930 TEST(eor_extend) {
931 SETUP();
932
933 START();
934 __ Mov(x0, 0x1111111111111111);
935 __ Mov(x1, 0x8000000080008081);
936 __ Eor(w6, w0, Operand(w1, UXTB));
937 __ Eor(x7, x0, Operand(x1, UXTH, 1));
938 __ Eor(w8, w0, Operand(w1, UXTW, 2));
939 __ Eor(x9, x0, Operand(x1, UXTX, 3));
940 __ Eor(w10, w0, Operand(w1, SXTB));
941 __ Eor(x11, x0, Operand(x1, SXTH, 1));
942 __ Eor(x12, x0, Operand(x1, SXTW, 2));
943 __ Eor(x13, x0, Operand(x1, SXTX, 3));
944 END();
945
946 if (CAN_RUN()) {
947 RUN();
948
949 ASSERT_EQUAL_64(0x11111190, x6);
950 ASSERT_EQUAL_64(0x1111111111101013, x7);
951 ASSERT_EQUAL_64(0x11131315, x8);
952 ASSERT_EQUAL_64(0x1111111511151519, x9);
953 ASSERT_EQUAL_64(0xeeeeee90, x10);
954 ASSERT_EQUAL_64(0xeeeeeeeeeeee1013, x11);
955 ASSERT_EQUAL_64(0xeeeeeeef11131315, x12);
956 ASSERT_EQUAL_64(0x1111111511151519, x13);
957 }
958 }
959
960
TEST(eon)961 TEST(eon) {
962 SETUP();
963
964 START();
965 __ Mov(x0, 0xfff0);
966 __ Mov(x1, 0xf00000ff);
967
968 __ Eon(x2, x0, Operand(x1));
969 __ Eon(w3, w0, Operand(w1, LSL, 4));
970 __ Eon(x4, x0, Operand(x1, LSL, 4));
971 __ Eon(x5, x0, Operand(x1, LSR, 1));
972 __ Eon(w6, w0, Operand(w1, ASR, 20));
973 __ Eon(x7, x0, Operand(x1, ASR, 20));
974 __ Eon(w8, w0, Operand(w1, ROR, 28));
975 __ Eon(x9, x0, Operand(x1, ROR, 28));
976 __ Eon(w10, w0, 0x03c003c0);
977 __ Eon(x11, x0, 0x0000100000001000);
978 END();
979
980 if (CAN_RUN()) {
981 RUN();
982
983 ASSERT_EQUAL_64(0xffffffff0fff00f0, x2);
984 ASSERT_EQUAL_64(0xffff0fff, x3);
985 ASSERT_EQUAL_64(0xfffffff0ffff0fff, x4);
986 ASSERT_EQUAL_64(0xffffffff87ff0070, x5);
987 ASSERT_EQUAL_64(0x0000ff0f, x6);
988 ASSERT_EQUAL_64(0xffffffffffff0f0f, x7);
989 ASSERT_EQUAL_64(0xffff0ff0, x8);
990 ASSERT_EQUAL_64(0xfffff00fffff0000, x9);
991 ASSERT_EQUAL_64(0xfc3f03cf, x10);
992 ASSERT_EQUAL_64(0xffffefffffff100f, x11);
993 }
994 }
995
996
TEST(eon_extend)997 TEST(eon_extend) {
998 SETUP();
999
1000 START();
1001 __ Mov(x0, 0x1111111111111111);
1002 __ Mov(x1, 0x8000000080008081);
1003 __ Eon(w6, w0, Operand(w1, UXTB));
1004 __ Eon(x7, x0, Operand(x1, UXTH, 1));
1005 __ Eon(w8, w0, Operand(w1, UXTW, 2));
1006 __ Eon(x9, x0, Operand(x1, UXTX, 3));
1007 __ Eon(w10, w0, Operand(w1, SXTB));
1008 __ Eon(x11, x0, Operand(x1, SXTH, 1));
1009 __ Eon(x12, x0, Operand(x1, SXTW, 2));
1010 __ Eon(x13, x0, Operand(x1, SXTX, 3));
1011 END();
1012
1013 if (CAN_RUN()) {
1014 RUN();
1015
1016 ASSERT_EQUAL_64(0xeeeeee6f, x6);
1017 ASSERT_EQUAL_64(0xeeeeeeeeeeefefec, x7);
1018 ASSERT_EQUAL_64(0xeeececea, x8);
1019 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6, x9);
1020 ASSERT_EQUAL_64(0x1111116f, x10);
1021 ASSERT_EQUAL_64(0x111111111111efec, x11);
1022 ASSERT_EQUAL_64(0x11111110eeececea, x12);
1023 ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6, x13);
1024 }
1025 }
1026
1027
TEST(mul)1028 TEST(mul) {
1029 SETUP();
1030
1031 START();
1032 __ Mov(x25, 0);
1033 __ Mov(x26, 1);
1034 __ Mov(x18, 0xffffffff);
1035 __ Mov(x19, 0xffffffffffffffff);
1036
1037 __ Mul(w0, w25, w25);
1038 __ Mul(w1, w25, w26);
1039 __ Mul(w2, w26, w18);
1040 __ Mul(w3, w18, w19);
1041 __ Mul(x4, x25, x25);
1042 __ Mul(x5, x26, x18);
1043 __ Mul(x6, x18, x19);
1044 __ Mul(x7, x19, x19);
1045 __ Smull(x8, w26, w18);
1046 __ Smull(x9, w18, w18);
1047 __ Smull(x10, w19, w19);
1048 __ Mneg(w11, w25, w25);
1049 __ Mneg(w12, w25, w26);
1050 __ Mneg(w13, w26, w18);
1051 __ Mneg(w14, w18, w19);
1052 __ Mneg(x20, x25, x25);
1053 __ Mneg(x21, x26, x18);
1054 __ Mneg(x22, x18, x19);
1055 __ Mneg(x23, x19, x19);
1056 END();
1057
1058 if (CAN_RUN()) {
1059 RUN();
1060
1061 ASSERT_EQUAL_64(0, x0);
1062 ASSERT_EQUAL_64(0, x1);
1063 ASSERT_EQUAL_64(0xffffffff, x2);
1064 ASSERT_EQUAL_64(1, x3);
1065 ASSERT_EQUAL_64(0, x4);
1066 ASSERT_EQUAL_64(0xffffffff, x5);
1067 ASSERT_EQUAL_64(0xffffffff00000001, x6);
1068 ASSERT_EQUAL_64(1, x7);
1069 ASSERT_EQUAL_64(0xffffffffffffffff, x8);
1070 ASSERT_EQUAL_64(1, x9);
1071 ASSERT_EQUAL_64(1, x10);
1072 ASSERT_EQUAL_64(0, x11);
1073 ASSERT_EQUAL_64(0, x12);
1074 ASSERT_EQUAL_64(1, x13);
1075 ASSERT_EQUAL_64(0xffffffff, x14);
1076 ASSERT_EQUAL_64(0, x20);
1077 ASSERT_EQUAL_64(0xffffffff00000001, x21);
1078 ASSERT_EQUAL_64(0xffffffff, x22);
1079 ASSERT_EQUAL_64(0xffffffffffffffff, x23);
1080 }
1081 }
1082
1083
SmullHelper(int64_t expected,int64_t a,int64_t b)1084 static void SmullHelper(int64_t expected, int64_t a, int64_t b) {
1085 SETUP();
1086 START();
1087 __ Mov(w0, a);
1088 __ Mov(w1, b);
1089 __ Smull(x2, w0, w1);
1090 END();
1091 if (CAN_RUN()) {
1092 RUN();
1093 ASSERT_EQUAL_64(expected, x2);
1094 }
1095 }
1096
1097
TEST(smull)1098 TEST(smull) {
1099 SmullHelper(0, 0, 0);
1100 SmullHelper(1, 1, 1);
1101 SmullHelper(-1, -1, 1);
1102 SmullHelper(1, -1, -1);
1103 SmullHelper(0xffffffff80000000, 0x80000000, 1);
1104 SmullHelper(0x0000000080000000, 0x00010000, 0x00008000);
1105 }
1106
1107
TEST(madd)1108 TEST(madd) {
1109 SETUP();
1110
1111 START();
1112 __ Mov(x16, 0);
1113 __ Mov(x17, 1);
1114 __ Mov(x18, 0xffffffff);
1115 __ Mov(x19, 0xffffffffffffffff);
1116
1117 __ Madd(w0, w16, w16, w16);
1118 __ Madd(w1, w16, w16, w17);
1119 __ Madd(w2, w16, w16, w18);
1120 __ Madd(w3, w16, w16, w19);
1121 __ Madd(w4, w16, w17, w17);
1122 __ Madd(w5, w17, w17, w18);
1123 __ Madd(w6, w17, w17, w19);
1124 __ Madd(w7, w17, w18, w16);
1125 __ Madd(w8, w17, w18, w18);
1126 __ Madd(w9, w18, w18, w17);
1127 __ Madd(w10, w18, w19, w18);
1128 __ Madd(w11, w19, w19, w19);
1129
1130 __ Madd(x12, x16, x16, x16);
1131 __ Madd(x13, x16, x16, x17);
1132 __ Madd(x14, x16, x16, x18);
1133 __ Madd(x15, x16, x16, x19);
1134 __ Madd(x20, x16, x17, x17);
1135 __ Madd(x21, x17, x17, x18);
1136 __ Madd(x22, x17, x17, x19);
1137 __ Madd(x23, x17, x18, x16);
1138 __ Madd(x24, x17, x18, x18);
1139 __ Madd(x25, x18, x18, x17);
1140 __ Madd(x26, x18, x19, x18);
1141 __ Madd(x27, x19, x19, x19);
1142
1143 END();
1144
1145 if (CAN_RUN()) {
1146 RUN();
1147
1148 ASSERT_EQUAL_64(0, x0);
1149 ASSERT_EQUAL_64(1, x1);
1150 ASSERT_EQUAL_64(0xffffffff, x2);
1151 ASSERT_EQUAL_64(0xffffffff, x3);
1152 ASSERT_EQUAL_64(1, x4);
1153 ASSERT_EQUAL_64(0, x5);
1154 ASSERT_EQUAL_64(0, x6);
1155 ASSERT_EQUAL_64(0xffffffff, x7);
1156 ASSERT_EQUAL_64(0xfffffffe, x8);
1157 ASSERT_EQUAL_64(2, x9);
1158 ASSERT_EQUAL_64(0, x10);
1159 ASSERT_EQUAL_64(0, x11);
1160
1161 ASSERT_EQUAL_64(0, x12);
1162 ASSERT_EQUAL_64(1, x13);
1163 ASSERT_EQUAL_64(0x00000000ffffffff, x14);
1164 ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1165 ASSERT_EQUAL_64(1, x20);
1166 ASSERT_EQUAL_64(0x0000000100000000, x21);
1167 ASSERT_EQUAL_64(0, x22);
1168 ASSERT_EQUAL_64(0x00000000ffffffff, x23);
1169 ASSERT_EQUAL_64(0x00000001fffffffe, x24);
1170 ASSERT_EQUAL_64(0xfffffffe00000002, x25);
1171 ASSERT_EQUAL_64(0, x26);
1172 ASSERT_EQUAL_64(0, x27);
1173 }
1174 }
1175
1176
TEST(msub)1177 TEST(msub) {
1178 SETUP();
1179
1180 START();
1181 __ Mov(x16, 0);
1182 __ Mov(x17, 1);
1183 __ Mov(x18, 0xffffffff);
1184 __ Mov(x19, 0xffffffffffffffff);
1185
1186 __ Msub(w0, w16, w16, w16);
1187 __ Msub(w1, w16, w16, w17);
1188 __ Msub(w2, w16, w16, w18);
1189 __ Msub(w3, w16, w16, w19);
1190 __ Msub(w4, w16, w17, w17);
1191 __ Msub(w5, w17, w17, w18);
1192 __ Msub(w6, w17, w17, w19);
1193 __ Msub(w7, w17, w18, w16);
1194 __ Msub(w8, w17, w18, w18);
1195 __ Msub(w9, w18, w18, w17);
1196 __ Msub(w10, w18, w19, w18);
1197 __ Msub(w11, w19, w19, w19);
1198
1199 __ Msub(x12, x16, x16, x16);
1200 __ Msub(x13, x16, x16, x17);
1201 __ Msub(x14, x16, x16, x18);
1202 __ Msub(x15, x16, x16, x19);
1203 __ Msub(x20, x16, x17, x17);
1204 __ Msub(x21, x17, x17, x18);
1205 __ Msub(x22, x17, x17, x19);
1206 __ Msub(x23, x17, x18, x16);
1207 __ Msub(x24, x17, x18, x18);
1208 __ Msub(x25, x18, x18, x17);
1209 __ Msub(x26, x18, x19, x18);
1210 __ Msub(x27, x19, x19, x19);
1211
1212 END();
1213
1214 if (CAN_RUN()) {
1215 RUN();
1216
1217 ASSERT_EQUAL_64(0, x0);
1218 ASSERT_EQUAL_64(1, x1);
1219 ASSERT_EQUAL_64(0xffffffff, x2);
1220 ASSERT_EQUAL_64(0xffffffff, x3);
1221 ASSERT_EQUAL_64(1, x4);
1222 ASSERT_EQUAL_64(0xfffffffe, x5);
1223 ASSERT_EQUAL_64(0xfffffffe, x6);
1224 ASSERT_EQUAL_64(1, x7);
1225 ASSERT_EQUAL_64(0, x8);
1226 ASSERT_EQUAL_64(0, x9);
1227 ASSERT_EQUAL_64(0xfffffffe, x10);
1228 ASSERT_EQUAL_64(0xfffffffe, x11);
1229
1230 ASSERT_EQUAL_64(0, x12);
1231 ASSERT_EQUAL_64(1, x13);
1232 ASSERT_EQUAL_64(0x00000000ffffffff, x14);
1233 ASSERT_EQUAL_64(0xffffffffffffffff, x15);
1234 ASSERT_EQUAL_64(1, x20);
1235 ASSERT_EQUAL_64(0x00000000fffffffe, x21);
1236 ASSERT_EQUAL_64(0xfffffffffffffffe, x22);
1237 ASSERT_EQUAL_64(0xffffffff00000001, x23);
1238 ASSERT_EQUAL_64(0, x24);
1239 ASSERT_EQUAL_64(0x0000000200000000, x25);
1240 ASSERT_EQUAL_64(0x00000001fffffffe, x26);
1241 ASSERT_EQUAL_64(0xfffffffffffffffe, x27);
1242 }
1243 }
1244
1245
TEST(smulh)1246 TEST(smulh) {
1247 SETUP();
1248
1249 START();
1250 __ Mov(x20, 0);
1251 __ Mov(x21, 1);
1252 __ Mov(x22, 0x0000000100000000);
1253 __ Mov(x23, 0x0000000012345678);
1254 __ Mov(x24, 0x0123456789abcdef);
1255 __ Mov(x25, 0x0000000200000000);
1256 __ Mov(x26, 0x8000000000000000);
1257 __ Mov(x27, 0xffffffffffffffff);
1258 __ Mov(x28, 0x5555555555555555);
1259 __ Mov(x29, 0xaaaaaaaaaaaaaaaa);
1260
1261 __ Smulh(x0, x20, x24);
1262 __ Smulh(x1, x21, x24);
1263 __ Smulh(x2, x22, x23);
1264 __ Smulh(x3, x22, x24);
1265 __ Smulh(x4, x24, x25);
1266 __ Smulh(x5, x23, x27);
1267 __ Smulh(x6, x26, x26);
1268 __ Smulh(x7, x26, x27);
1269 __ Smulh(x8, x27, x27);
1270 __ Smulh(x9, x28, x28);
1271 __ Smulh(x10, x28, x29);
1272 __ Smulh(x11, x29, x29);
1273 END();
1274
1275 if (CAN_RUN()) {
1276 RUN();
1277
1278 ASSERT_EQUAL_64(0, x0);
1279 ASSERT_EQUAL_64(0, x1);
1280 ASSERT_EQUAL_64(0, x2);
1281 ASSERT_EQUAL_64(0x0000000001234567, x3);
1282 ASSERT_EQUAL_64(0x0000000002468acf, x4);
1283 ASSERT_EQUAL_64(0xffffffffffffffff, x5);
1284 ASSERT_EQUAL_64(0x4000000000000000, x6);
1285 ASSERT_EQUAL_64(0, x7);
1286 ASSERT_EQUAL_64(0, x8);
1287 ASSERT_EQUAL_64(0x1c71c71c71c71c71, x9);
1288 ASSERT_EQUAL_64(0xe38e38e38e38e38e, x10);
1289 ASSERT_EQUAL_64(0x1c71c71c71c71c72, x11);
1290 }
1291 }
1292
1293
TEST(umulh)1294 TEST(umulh) {
1295 SETUP();
1296
1297 START();
1298 __ Mov(x20, 0);
1299 __ Mov(x21, 1);
1300 __ Mov(x22, 0x0000000100000000);
1301 __ Mov(x23, 0x0000000012345678);
1302 __ Mov(x24, 0x0123456789abcdef);
1303 __ Mov(x25, 0x0000000200000000);
1304 __ Mov(x26, 0x8000000000000000);
1305 __ Mov(x27, 0xffffffffffffffff);
1306 __ Mov(x28, 0x5555555555555555);
1307 __ Mov(x29, 0xaaaaaaaaaaaaaaaa);
1308
1309 __ Umulh(x0, x20, x24);
1310 __ Umulh(x1, x21, x24);
1311 __ Umulh(x2, x22, x23);
1312 __ Umulh(x3, x22, x24);
1313 __ Umulh(x4, x24, x25);
1314 __ Umulh(x5, x23, x27);
1315 __ Umulh(x6, x26, x26);
1316 __ Umulh(x7, x26, x27);
1317 __ Umulh(x8, x27, x27);
1318 __ Umulh(x9, x28, x28);
1319 __ Umulh(x10, x28, x29);
1320 __ Umulh(x11, x29, x29);
1321 END();
1322
1323 if (CAN_RUN()) {
1324 RUN();
1325
1326 ASSERT_EQUAL_64(0, x0);
1327 ASSERT_EQUAL_64(0, x1);
1328 ASSERT_EQUAL_64(0, x2);
1329 ASSERT_EQUAL_64(0x0000000001234567, x3);
1330 ASSERT_EQUAL_64(0x0000000002468acf, x4);
1331 ASSERT_EQUAL_64(0x0000000012345677, x5);
1332 ASSERT_EQUAL_64(0x4000000000000000, x6);
1333 ASSERT_EQUAL_64(0x7fffffffffffffff, x7);
1334 ASSERT_EQUAL_64(0xfffffffffffffffe, x8);
1335 ASSERT_EQUAL_64(0x1c71c71c71c71c71, x9);
1336 ASSERT_EQUAL_64(0x38e38e38e38e38e3, x10);
1337 ASSERT_EQUAL_64(0x71c71c71c71c71c6, x11);
1338 }
1339 }
1340
1341
TEST(smaddl_umaddl_umull)1342 TEST(smaddl_umaddl_umull) {
1343 SETUP();
1344
1345 START();
1346 __ Mov(x17, 1);
1347 __ Mov(x18, 0x00000000ffffffff);
1348 __ Mov(x19, 0xffffffffffffffff);
1349 __ Mov(x20, 4);
1350 __ Mov(x21, 0x0000000200000000);
1351
1352 __ Smaddl(x9, w17, w18, x20);
1353 __ Smaddl(x10, w18, w18, x20);
1354 __ Smaddl(x11, w19, w19, x20);
1355 __ Smaddl(x12, w19, w19, x21);
1356 __ Umaddl(x13, w17, w18, x20);
1357 __ Umaddl(x14, w18, w18, x20);
1358 __ Umaddl(x15, w19, w19, x20);
1359 __ Umaddl(x22, w19, w19, x21);
1360 __ Umull(x24, w19, w19);
1361 __ Umull(x25, w17, w18);
1362 END();
1363
1364 if (CAN_RUN()) {
1365 RUN();
1366
1367 ASSERT_EQUAL_64(3, x9);
1368 ASSERT_EQUAL_64(5, x10);
1369 ASSERT_EQUAL_64(5, x11);
1370 ASSERT_EQUAL_64(0x0000000200000001, x12);
1371 ASSERT_EQUAL_64(0x0000000100000003, x13);
1372 ASSERT_EQUAL_64(0xfffffffe00000005, x14);
1373 ASSERT_EQUAL_64(0xfffffffe00000005, x15);
1374 ASSERT_EQUAL_64(1, x22);
1375 ASSERT_EQUAL_64(0xfffffffe00000001, x24);
1376 ASSERT_EQUAL_64(0x00000000ffffffff, x25);
1377 }
1378 }
1379
1380
TEST(smsubl_umsubl)1381 TEST(smsubl_umsubl) {
1382 SETUP();
1383
1384 START();
1385 __ Mov(x17, 1);
1386 __ Mov(x18, 0x00000000ffffffff);
1387 __ Mov(x19, 0xffffffffffffffff);
1388 __ Mov(x20, 4);
1389 __ Mov(x21, 0x0000000200000000);
1390
1391 __ Smsubl(x9, w17, w18, x20);
1392 __ Smsubl(x10, w18, w18, x20);
1393 __ Smsubl(x11, w19, w19, x20);
1394 __ Smsubl(x12, w19, w19, x21);
1395 __ Umsubl(x13, w17, w18, x20);
1396 __ Umsubl(x14, w18, w18, x20);
1397 __ Umsubl(x15, w19, w19, x20);
1398 __ Umsubl(x22, w19, w19, x21);
1399 END();
1400
1401 if (CAN_RUN()) {
1402 RUN();
1403
1404 ASSERT_EQUAL_64(5, x9);
1405 ASSERT_EQUAL_64(3, x10);
1406 ASSERT_EQUAL_64(3, x11);
1407 ASSERT_EQUAL_64(0x00000001ffffffff, x12);
1408 ASSERT_EQUAL_64(0xffffffff00000005, x13);
1409 ASSERT_EQUAL_64(0x0000000200000003, x14);
1410 ASSERT_EQUAL_64(0x0000000200000003, x15);
1411 ASSERT_EQUAL_64(0x00000003ffffffff, x22);
1412 }
1413 }
1414
1415
TEST(div)1416 TEST(div) {
1417 SETUP();
1418
1419 START();
1420 __ Mov(x16, 1);
1421 __ Mov(x17, 0xffffffff);
1422 __ Mov(x18, 0xffffffffffffffff);
1423 __ Mov(x19, 0x80000000);
1424 __ Mov(x20, 0x8000000000000000);
1425 __ Mov(x21, 2);
1426
1427 __ Udiv(w0, w16, w16);
1428 __ Udiv(w1, w17, w16);
1429 __ Sdiv(w2, w16, w16);
1430 __ Sdiv(w3, w16, w17);
1431 __ Sdiv(w4, w17, w18);
1432
1433 __ Udiv(x5, x16, x16);
1434 __ Udiv(x6, x17, x18);
1435 __ Sdiv(x7, x16, x16);
1436 __ Sdiv(x8, x16, x17);
1437 __ Sdiv(x9, x17, x18);
1438
1439 __ Udiv(w10, w19, w21);
1440 __ Sdiv(w11, w19, w21);
1441 __ Udiv(x12, x19, x21);
1442 __ Sdiv(x13, x19, x21);
1443 __ Udiv(x14, x20, x21);
1444 __ Sdiv(x15, x20, x21);
1445
1446 __ Udiv(w22, w19, w17);
1447 __ Sdiv(w23, w19, w17);
1448 __ Udiv(x24, x20, x18);
1449 __ Sdiv(x25, x20, x18);
1450
1451 __ Udiv(x26, x16, x21);
1452 __ Sdiv(x27, x16, x21);
1453 __ Udiv(x28, x18, x21);
1454 __ Sdiv(x29, x18, x21);
1455
1456 __ Mov(x17, 0);
1457 __ Udiv(w18, w16, w17);
1458 __ Sdiv(w19, w16, w17);
1459 __ Udiv(x20, x16, x17);
1460 __ Sdiv(x21, x16, x17);
1461 END();
1462
1463 if (CAN_RUN()) {
1464 RUN();
1465
1466 ASSERT_EQUAL_64(1, x0);
1467 ASSERT_EQUAL_64(0xffffffff, x1);
1468 ASSERT_EQUAL_64(1, x2);
1469 ASSERT_EQUAL_64(0xffffffff, x3);
1470 ASSERT_EQUAL_64(1, x4);
1471 ASSERT_EQUAL_64(1, x5);
1472 ASSERT_EQUAL_64(0, x6);
1473 ASSERT_EQUAL_64(1, x7);
1474 ASSERT_EQUAL_64(0, x8);
1475 ASSERT_EQUAL_64(0xffffffff00000001, x9);
1476 ASSERT_EQUAL_64(0x40000000, x10);
1477 ASSERT_EQUAL_64(0xc0000000, x11);
1478 ASSERT_EQUAL_64(0x0000000040000000, x12);
1479 ASSERT_EQUAL_64(0x0000000040000000, x13);
1480 ASSERT_EQUAL_64(0x4000000000000000, x14);
1481 ASSERT_EQUAL_64(0xc000000000000000, x15);
1482 ASSERT_EQUAL_64(0, x22);
1483 ASSERT_EQUAL_64(0x80000000, x23);
1484 ASSERT_EQUAL_64(0, x24);
1485 ASSERT_EQUAL_64(0x8000000000000000, x25);
1486 ASSERT_EQUAL_64(0, x26);
1487 ASSERT_EQUAL_64(0, x27);
1488 ASSERT_EQUAL_64(0x7fffffffffffffff, x28);
1489 ASSERT_EQUAL_64(0, x29);
1490 ASSERT_EQUAL_64(0, x18);
1491 ASSERT_EQUAL_64(0, x19);
1492 ASSERT_EQUAL_64(0, x20);
1493 ASSERT_EQUAL_64(0, x21);
1494 }
1495 }
1496
1497
TEST(rbit_rev)1498 TEST(rbit_rev) {
1499 SETUP();
1500
1501 START();
1502 __ Mov(x24, 0xfedcba9876543210);
1503 __ Rbit(w0, w24);
1504 __ Rbit(x1, x24);
1505 __ Rev16(w2, w24);
1506 __ Rev16(x3, x24);
1507 __ Rev(w4, w24);
1508 __ Rev32(x5, x24);
1509 __ Rev64(x6, x24);
1510 __ Rev(x7, x24);
1511 END();
1512
1513 if (CAN_RUN()) {
1514 RUN();
1515
1516 ASSERT_EQUAL_64(0x084c2a6e, x0);
1517 ASSERT_EQUAL_64(0x084c2a6e195d3b7f, x1);
1518 ASSERT_EQUAL_64(0x54761032, x2);
1519 ASSERT_EQUAL_64(0xdcfe98ba54761032, x3);
1520 ASSERT_EQUAL_64(0x10325476, x4);
1521 ASSERT_EQUAL_64(0x98badcfe10325476, x5);
1522 ASSERT_EQUAL_64(0x1032547698badcfe, x6);
1523 ASSERT_EQUAL_64(0x1032547698badcfe, x7);
1524 }
1525 }
1526
1527 typedef void (MacroAssembler::*TestBranchSignature)(const Register& rt,
1528 unsigned bit_pos,
1529 Label* label);
1530
TbzRangePoolLimitHelper(TestBranchSignature test_branch)1531 static void TbzRangePoolLimitHelper(TestBranchSignature test_branch) {
1532 const int kTbzRange = 32768;
1533 const int kNumLdrLiteral = kTbzRange / 4;
1534 const int fuzz_range = 2;
1535 for (int n = kNumLdrLiteral - fuzz_range; n <= kNumLdrLiteral + fuzz_range;
1536 ++n) {
1537 for (int margin = -32; margin < 32; margin += 4) {
1538 SETUP();
1539
1540 START();
1541
1542 // Emit 32KB of literals (equal to the range of TBZ).
1543 for (int i = 0; i < n; ++i) {
1544 __ Ldr(w0, 0x12345678);
1545 }
1546
1547 const int kLiteralMargin = 128 * KBytes;
1548
1549 // Emit enough NOPs to be just about to emit the literal pool.
1550 ptrdiff_t end =
1551 masm.GetCursorOffset() + (kLiteralMargin - n * 4 + margin);
1552 while (masm.GetCursorOffset() < end) {
1553 __ Nop();
1554 }
1555
1556 // Add a TBZ instruction.
1557 Label label;
1558
1559 (masm.*test_branch)(x0, 2, &label);
1560
1561 // Add enough NOPs to surpass its range, to make sure we can encode the
1562 // veneer.
1563 end = masm.GetCursorOffset() + (kTbzRange - 4);
1564 {
1565 ExactAssemblyScope scope(&masm,
1566 kTbzRange,
1567 ExactAssemblyScope::kMaximumSize);
1568 while (masm.GetCursorOffset() < end) __ nop();
1569 }
1570
1571 // Finally, bind the label.
1572 __ Bind(&label);
1573
1574 END();
1575
1576 if (CAN_RUN()) {
1577 RUN();
1578 }
1579 }
1580 }
1581 }
1582
TEST(test_branch_limits_literal_pool_size_tbz)1583 TEST(test_branch_limits_literal_pool_size_tbz) {
1584 TbzRangePoolLimitHelper(&MacroAssembler::Tbz);
1585 }
1586
TEST(test_branch_limits_literal_pool_size_tbnz)1587 TEST(test_branch_limits_literal_pool_size_tbnz) {
1588 TbzRangePoolLimitHelper(&MacroAssembler::Tbnz);
1589 }
1590
TEST(clz_cls)1591 TEST(clz_cls) {
1592 SETUP();
1593
1594 START();
1595 __ Mov(x24, 0x0008000000800000);
1596 __ Mov(x25, 0xff800000fff80000);
1597 __ Mov(x26, 0);
1598 __ Clz(w0, w24);
1599 __ Clz(x1, x24);
1600 __ Clz(w2, w25);
1601 __ Clz(x3, x25);
1602 __ Clz(w4, w26);
1603 __ Clz(x5, x26);
1604 __ Cls(w6, w24);
1605 __ Cls(x7, x24);
1606 __ Cls(w8, w25);
1607 __ Cls(x9, x25);
1608 __ Cls(w10, w26);
1609 __ Cls(x11, x26);
1610 END();
1611
1612 if (CAN_RUN()) {
1613 RUN();
1614
1615 ASSERT_EQUAL_64(8, x0);
1616 ASSERT_EQUAL_64(12, x1);
1617 ASSERT_EQUAL_64(0, x2);
1618 ASSERT_EQUAL_64(0, x3);
1619 ASSERT_EQUAL_64(32, x4);
1620 ASSERT_EQUAL_64(64, x5);
1621 ASSERT_EQUAL_64(7, x6);
1622 ASSERT_EQUAL_64(11, x7);
1623 ASSERT_EQUAL_64(12, x8);
1624 ASSERT_EQUAL_64(8, x9);
1625 ASSERT_EQUAL_64(31, x10);
1626 ASSERT_EQUAL_64(63, x11);
1627 }
1628 }
1629
1630
TEST(pacia_pacib_autia_autib)1631 TEST(pacia_pacib_autia_autib) {
1632 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
1633
1634 START();
1635
1636 Register pointer = x24;
1637 Register modifier = x25;
1638
1639 __ Mov(pointer, 0x0000000012345678);
1640 __ Mov(modifier, 0x477d469dec0b8760);
1641
1642 // Generate PACs using keys A and B.
1643 __ Mov(x0, pointer);
1644 __ Pacia(x0, modifier);
1645
1646 __ Mov(x1, pointer);
1647 __ Pacib(x1, modifier);
1648
1649 // Authenticate the pointers above.
1650 __ Mov(x2, x0);
1651 __ Autia(x2, modifier);
1652
1653 __ Mov(x3, x1);
1654 __ Autib(x3, modifier);
1655
1656 // Attempt to authenticate incorrect pointers.
1657 __ Mov(x4, x1);
1658 __ Autia(x4, modifier);
1659
1660 __ Mov(x5, x0);
1661 __ Autib(x5, modifier);
1662
1663 // Mask out just the PAC code bits.
1664 // TODO: use Simulator::CalculatePACMask in a nice way.
1665 __ And(x0, x0, 0x007f000000000000);
1666 __ And(x1, x1, 0x007f000000000000);
1667
1668 END();
1669
1670 if (CAN_RUN()) {
1671 RUN();
1672
1673 // Check PAC codes have been generated and aren't equal.
1674 // NOTE: with a different ComputePAC implementation, there may be a
1675 // collision.
1676 ASSERT_NOT_EQUAL_64(0, x0);
1677 ASSERT_NOT_EQUAL_64(0, x1);
1678 ASSERT_NOT_EQUAL_64(x0, x1);
1679
1680 // Pointers correctly authenticated.
1681 ASSERT_EQUAL_64(pointer, x2);
1682 ASSERT_EQUAL_64(pointer, x3);
1683
1684 // Pointers corrupted after failing to authenticate.
1685 ASSERT_EQUAL_64(0x0020000012345678, x4);
1686 ASSERT_EQUAL_64(0x0040000012345678, x5);
1687 }
1688 }
1689
1690
TEST(paciza_pacizb_autiza_autizb)1691 TEST(paciza_pacizb_autiza_autizb) {
1692 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
1693
1694 START();
1695
1696 Register pointer = x24;
1697
1698 __ Mov(pointer, 0x0000000012345678);
1699
1700 // Generate PACs using keys A and B.
1701 __ Mov(x0, pointer);
1702 __ Paciza(x0);
1703
1704 __ Mov(x1, pointer);
1705 __ Pacizb(x1);
1706
1707 // Authenticate the pointers above.
1708 __ Mov(x2, x0);
1709 __ Autiza(x2);
1710
1711 __ Mov(x3, x1);
1712 __ Autizb(x3);
1713
1714 // Attempt to authenticate incorrect pointers.
1715 __ Mov(x4, x1);
1716 __ Autiza(x4);
1717
1718 __ Mov(x5, x0);
1719 __ Autizb(x5);
1720
1721 // Mask out just the PAC code bits.
1722 // TODO: use Simulator::CalculatePACMask in a nice way.
1723 __ And(x0, x0, 0x007f000000000000);
1724 __ And(x1, x1, 0x007f000000000000);
1725
1726 END();
1727
1728 if (CAN_RUN()) {
1729 RUN();
1730
1731 // Check PAC codes have been generated and aren't equal.
1732 // NOTE: with a different ComputePAC implementation, there may be a
1733 // collision.
1734 ASSERT_NOT_EQUAL_64(0, x0);
1735 ASSERT_NOT_EQUAL_64(0, x1);
1736 ASSERT_NOT_EQUAL_64(x0, x1);
1737
1738 // Pointers correctly authenticated.
1739 ASSERT_EQUAL_64(pointer, x2);
1740 ASSERT_EQUAL_64(pointer, x3);
1741
1742 // Pointers corrupted after failing to authenticate.
1743 ASSERT_EQUAL_64(0x0020000012345678, x4);
1744 ASSERT_EQUAL_64(0x0040000012345678, x5);
1745 }
1746 }
1747
1748
TEST(pacda_pacdb_autda_autdb)1749 TEST(pacda_pacdb_autda_autdb) {
1750 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
1751
1752 START();
1753
1754 Register pointer = x24;
1755 Register modifier = x25;
1756
1757 __ Mov(pointer, 0x0000000012345678);
1758 __ Mov(modifier, 0x477d469dec0b8760);
1759
1760 // Generate PACs using keys A and B.
1761 __ Mov(x0, pointer);
1762 __ Pacda(x0, modifier);
1763
1764 __ Mov(x1, pointer);
1765 __ Pacdb(x1, modifier);
1766
1767 // Authenticate the pointers above.
1768 __ Mov(x2, x0);
1769 __ Autda(x2, modifier);
1770
1771 __ Mov(x3, x1);
1772 __ Autdb(x3, modifier);
1773
1774 // Attempt to authenticate incorrect pointers.
1775 __ Mov(x4, x1);
1776 __ Autda(x4, modifier);
1777
1778 __ Mov(x5, x0);
1779 __ Autdb(x5, modifier);
1780
1781 // Mask out just the PAC code bits.
1782 // TODO: use Simulator::CalculatePACMask in a nice way.
1783 __ And(x0, x0, 0x007f000000000000);
1784 __ And(x1, x1, 0x007f000000000000);
1785
1786 END();
1787
1788 if (CAN_RUN()) {
1789 RUN();
1790
1791 // Check PAC codes have been generated and aren't equal.
1792 // NOTE: with a different ComputePAC implementation, there may be a
1793 // collision.
1794 ASSERT_NOT_EQUAL_64(0, x0);
1795 ASSERT_NOT_EQUAL_64(0, x1);
1796 ASSERT_NOT_EQUAL_64(x0, x1);
1797
1798 // Pointers correctly authenticated.
1799 ASSERT_EQUAL_64(pointer, x2);
1800 ASSERT_EQUAL_64(pointer, x3);
1801
1802 // Pointers corrupted after failing to authenticate.
1803 ASSERT_EQUAL_64(0x0020000012345678, x4);
1804 ASSERT_EQUAL_64(0x0040000012345678, x5);
1805 }
1806 }
1807
1808
TEST(pacdza_pacdzb_autdza_autdzb)1809 TEST(pacdza_pacdzb_autdza_autdzb) {
1810 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
1811
1812 START();
1813
1814 Register pointer = x24;
1815
1816 __ Mov(pointer, 0x0000000012345678);
1817
1818 // Generate PACs using keys A and B.
1819 __ Mov(x0, pointer);
1820 __ Pacdza(x0);
1821
1822 __ Mov(x1, pointer);
1823 __ Pacdzb(x1);
1824
1825 // Authenticate the pointers above.
1826 __ Mov(x2, x0);
1827 __ Autdza(x2);
1828
1829 __ Mov(x3, x1);
1830 __ Autdzb(x3);
1831
1832 // Attempt to authenticate incorrect pointers.
1833 __ Mov(x4, x1);
1834 __ Autdza(x4);
1835
1836 __ Mov(x5, x0);
1837 __ Autdzb(x5);
1838
1839 // Mask out just the PAC code bits.
1840 // TODO: use Simulator::CalculatePACMask in a nice way.
1841 __ And(x0, x0, 0x007f000000000000);
1842 __ And(x1, x1, 0x007f000000000000);
1843
1844 END();
1845
1846 if (CAN_RUN()) {
1847 RUN();
1848
1849 // Check PAC codes have been generated and aren't equal.
1850 // NOTE: with a different ComputePAC implementation, there may be a
1851 // collision.
1852 ASSERT_NOT_EQUAL_64(0, x0);
1853 ASSERT_NOT_EQUAL_64(0, x1);
1854 ASSERT_NOT_EQUAL_64(x0, x1);
1855
1856 // Pointers correctly authenticated.
1857 ASSERT_EQUAL_64(pointer, x2);
1858 ASSERT_EQUAL_64(pointer, x3);
1859
1860 // Pointers corrupted after failing to authenticate.
1861 ASSERT_EQUAL_64(0x0020000012345678, x4);
1862 ASSERT_EQUAL_64(0x0040000012345678, x5);
1863 }
1864 }
1865
1866
TEST(pacga_xpaci_xpacd)1867 TEST(pacga_xpaci_xpacd) {
1868 SETUP_WITH_FEATURES(CPUFeatures::kPAuth, CPUFeatures::kPAuthGeneric);
1869
1870 START();
1871
1872 Register pointer = x24;
1873 Register modifier = x25;
1874
1875 __ Mov(pointer, 0x0000000012345678);
1876 __ Mov(modifier, 0x477d469dec0b8760);
1877
1878 // Generate generic PAC.
1879 __ Pacga(x0, pointer, modifier);
1880
1881 // Generate PACs using key A.
1882 __ Mov(x1, pointer);
1883 __ Mov(x2, pointer);
1884 __ Pacia(x1, modifier);
1885 __ Pacda(x2, modifier);
1886
1887 // Strip PACs.
1888 __ Mov(x3, x1);
1889 __ Mov(x4, x2);
1890 __ Xpaci(x3);
1891 __ Xpacd(x4);
1892
1893 // Mask out just the PAC code bits.
1894 // TODO: use Simulator::CalculatePACMask in a nice way.
1895 __ And(x0, x0, 0xffffffff00000000);
1896 __ And(x1, x1, 0x007f000000000000);
1897 __ And(x2, x2, 0x007f000000000000);
1898
1899 END();
1900
1901 if (CAN_RUN()) {
1902 RUN();
1903
1904
1905 // Check PAC codes have been generated and aren't equal.
1906 // NOTE: with a different ComputePAC implementation, there may be a
1907 // collision.
1908 ASSERT_NOT_EQUAL_64(0, x0);
1909
1910 ASSERT_NOT_EQUAL_64(0, x1);
1911 ASSERT_NOT_EQUAL_64(0, x2);
1912 ASSERT_NOT_EQUAL_64(x1, x2);
1913
1914 ASSERT_EQUAL_64(pointer, x3);
1915 ASSERT_EQUAL_64(pointer, x4);
1916 }
1917 }
1918
1919
TEST(label)1920 TEST(label) {
1921 SETUP();
1922
1923 Label label_1, label_2, label_3, label_4;
1924
1925 START();
1926 __ Mov(x0, 0x1);
1927 __ Mov(x1, 0x0);
1928 __ Mov(x22, lr); // Save lr.
1929
1930 __ B(&label_1);
1931 __ B(&label_1);
1932 __ B(&label_1); // Multiple branches to the same label.
1933 __ Mov(x0, 0x0);
1934 __ Bind(&label_2);
1935 __ B(&label_3); // Forward branch.
1936 __ Mov(x0, 0x0);
1937 __ Bind(&label_1);
1938 __ B(&label_2); // Backward branch.
1939 __ Mov(x0, 0x0);
1940 __ Bind(&label_3);
1941 __ Bl(&label_4);
1942 END();
1943
1944 __ Bind(&label_4);
1945 __ Mov(x1, 0x1);
1946 __ Mov(lr, x22);
1947 END();
1948
1949 if (CAN_RUN()) {
1950 RUN();
1951
1952 ASSERT_EQUAL_64(0x1, x0);
1953 ASSERT_EQUAL_64(0x1, x1);
1954 }
1955 }
1956
1957
TEST(label_2)1958 TEST(label_2) {
1959 SETUP();
1960
1961 Label label_1, label_2, label_3;
1962 Label first_jump_to_3;
1963
1964 START();
1965 __ Mov(x0, 0x0);
1966
1967 __ B(&label_1);
1968 ptrdiff_t offset_2 = masm.GetCursorOffset();
1969 __ Orr(x0, x0, 1 << 1);
1970 __ B(&label_3);
1971 ptrdiff_t offset_1 = masm.GetCursorOffset();
1972 __ Orr(x0, x0, 1 << 0);
1973 __ B(&label_2);
1974 ptrdiff_t offset_3 = masm.GetCursorOffset();
1975 __ Tbz(x0, 2, &first_jump_to_3);
1976 __ Orr(x0, x0, 1 << 3);
1977 __ Bind(&first_jump_to_3);
1978 __ Orr(x0, x0, 1 << 2);
1979 __ Tbz(x0, 3, &label_3);
1980
1981 // Labels 1, 2, and 3 are bound before the current buffer offset. Branches to
1982 // label_1 and label_2 branch respectively forward and backward. Branches to
1983 // label 3 include both forward and backward branches.
1984 masm.BindToOffset(&label_1, offset_1);
1985 masm.BindToOffset(&label_2, offset_2);
1986 masm.BindToOffset(&label_3, offset_3);
1987
1988 END();
1989
1990 if (CAN_RUN()) {
1991 RUN();
1992
1993 ASSERT_EQUAL_64(0xf, x0);
1994 }
1995 }
1996
1997
TEST(adr)1998 TEST(adr) {
1999 SETUP();
2000
2001 Label label_1, label_2, label_3, label_4;
2002
2003 START();
2004 __ Mov(x0, 0x0); // Set to non-zero to indicate failure.
2005 __ Adr(x1, &label_3); // Set to zero to indicate success.
2006
2007 __ Adr(x2, &label_1); // Multiple forward references to the same label.
2008 __ Adr(x3, &label_1);
2009 __ Adr(x4, &label_1);
2010
2011 __ Bind(&label_2);
2012 __ Eor(x5, x2, Operand(x3)); // Ensure that x2,x3 and x4 are identical.
2013 __ Eor(x6, x2, Operand(x4));
2014 __ Orr(x0, x0, Operand(x5));
2015 __ Orr(x0, x0, Operand(x6));
2016 __ Br(x2); // label_1, label_3
2017
2018 __ Bind(&label_3);
2019 __ Adr(x2, &label_3); // Self-reference (offset 0).
2020 __ Eor(x1, x1, Operand(x2));
2021 __ Adr(x2, &label_4); // Simple forward reference.
2022 __ Br(x2); // label_4
2023
2024 __ Bind(&label_1);
2025 __ Adr(x2, &label_3); // Multiple reverse references to the same label.
2026 __ Adr(x3, &label_3);
2027 __ Adr(x4, &label_3);
2028 __ Adr(x5, &label_2); // Simple reverse reference.
2029 __ Br(x5); // label_2
2030
2031 __ Bind(&label_4);
2032 END();
2033
2034 if (CAN_RUN()) {
2035 RUN();
2036
2037 ASSERT_EQUAL_64(0x0, x0);
2038 ASSERT_EQUAL_64(0x0, x1);
2039 }
2040 }
2041
2042
2043 // Simple adrp tests: check that labels are linked and handled properly.
2044 // This is similar to the adr test, but all the adrp instructions are put on the
2045 // same page so that they return the same value.
TEST(adrp)2046 TEST(adrp) {
2047 Label start;
2048 Label label_1, label_2, label_3;
2049
2050 SETUP_CUSTOM(2 * kPageSize, PageOffsetDependentCode);
2051 START();
2052
2053 // Waste space until the start of a page.
2054 {
2055 ExactAssemblyScope scope(&masm,
2056 kPageSize,
2057 ExactAssemblyScope::kMaximumSize);
2058 const uintptr_t kPageOffsetMask = kPageSize - 1;
2059 while ((masm.GetCursorAddress<uintptr_t>() & kPageOffsetMask) != 0) {
2060 __ b(&start);
2061 }
2062 __ bind(&start);
2063 }
2064
2065 // Simple forward reference.
2066 __ Adrp(x0, &label_2);
2067
2068 __ Bind(&label_1);
2069
2070 // Multiple forward references to the same label.
2071 __ Adrp(x1, &label_3);
2072 __ Adrp(x2, &label_3);
2073 __ Adrp(x3, &label_3);
2074
2075 __ Bind(&label_2);
2076
2077 // Self-reference (offset 0).
2078 __ Adrp(x4, &label_2);
2079
2080 __ Bind(&label_3);
2081
2082 // Simple reverse reference.
2083 __ Adrp(x5, &label_1);
2084
2085 // Multiple reverse references to the same label.
2086 __ Adrp(x6, &label_2);
2087 __ Adrp(x7, &label_2);
2088 __ Adrp(x8, &label_2);
2089
2090 VIXL_ASSERT(masm.GetSizeOfCodeGeneratedSince(&start) < kPageSize);
2091 END();
2092 if (CAN_RUN()) {
2093 RUN();
2094
2095 uint64_t expected = reinterpret_cast<uint64_t>(
2096 AlignDown(masm.GetLabelAddress<uint64_t*>(&start), kPageSize));
2097 ASSERT_EQUAL_64(expected, x0);
2098 ASSERT_EQUAL_64(expected, x1);
2099 ASSERT_EQUAL_64(expected, x2);
2100 ASSERT_EQUAL_64(expected, x3);
2101 ASSERT_EQUAL_64(expected, x4);
2102 ASSERT_EQUAL_64(expected, x5);
2103 ASSERT_EQUAL_64(expected, x6);
2104 ASSERT_EQUAL_64(expected, x7);
2105 ASSERT_EQUAL_64(expected, x8);
2106 }
2107 }
2108
2109
AdrpPageBoundaryHelper(unsigned offset_into_page)2110 static void AdrpPageBoundaryHelper(unsigned offset_into_page) {
2111 VIXL_ASSERT(offset_into_page < kPageSize);
2112 VIXL_ASSERT((offset_into_page % kInstructionSize) == 0);
2113
2114 const uintptr_t kPageOffsetMask = kPageSize - 1;
2115
2116 // The test label is always bound on page 0. Adrp instructions are generated
2117 // on pages from kStartPage to kEndPage (inclusive).
2118 const int kStartPage = -16;
2119 const int kEndPage = 16;
2120 const int kMaxCodeSize = (kEndPage - kStartPage + 2) * kPageSize;
2121
2122 SETUP_CUSTOM(kMaxCodeSize, PageOffsetDependentCode);
2123 START();
2124
2125 Label test;
2126 Label start;
2127
2128 {
2129 ExactAssemblyScope scope(&masm,
2130 kMaxCodeSize,
2131 ExactAssemblyScope::kMaximumSize);
2132 // Initialize NZCV with `eq` flags.
2133 __ cmp(wzr, wzr);
2134 // Waste space until the start of a page.
2135 while ((masm.GetCursorAddress<uintptr_t>() & kPageOffsetMask) != 0) {
2136 __ b(&start);
2137 }
2138
2139 // The first page.
2140 VIXL_STATIC_ASSERT(kStartPage < 0);
2141 {
2142 ExactAssemblyScope scope_page(&masm, kPageSize);
2143 __ bind(&start);
2144 __ adrp(x0, &test);
2145 __ adrp(x1, &test);
2146 for (size_t i = 2; i < (kPageSize / kInstructionSize); i += 2) {
2147 __ ccmp(x0, x1, NoFlag, eq);
2148 __ adrp(x1, &test);
2149 }
2150 }
2151
2152 // Subsequent pages.
2153 VIXL_STATIC_ASSERT(kEndPage >= 0);
2154 for (int page = (kStartPage + 1); page <= kEndPage; page++) {
2155 ExactAssemblyScope scope_page(&masm, kPageSize);
2156 if (page == 0) {
2157 for (size_t i = 0; i < (kPageSize / kInstructionSize);) {
2158 if (i++ == (offset_into_page / kInstructionSize)) __ bind(&test);
2159 __ ccmp(x0, x1, NoFlag, eq);
2160 if (i++ == (offset_into_page / kInstructionSize)) __ bind(&test);
2161 __ adrp(x1, &test);
2162 }
2163 } else {
2164 for (size_t i = 0; i < (kPageSize / kInstructionSize); i += 2) {
2165 __ ccmp(x0, x1, NoFlag, eq);
2166 __ adrp(x1, &test);
2167 }
2168 }
2169 }
2170 }
2171
2172 // Every adrp instruction pointed to the same label (`test`), so they should
2173 // all have produced the same result.
2174
2175 END();
2176 if (CAN_RUN()) {
2177 RUN();
2178
2179 uintptr_t expected =
2180 AlignDown(masm.GetLabelAddress<uintptr_t>(&test), kPageSize);
2181 ASSERT_EQUAL_64(expected, x0);
2182 ASSERT_EQUAL_64(expected, x1);
2183 ASSERT_EQUAL_NZCV(ZCFlag);
2184 }
2185 }
2186
2187
2188 // Test that labels are correctly referenced by adrp across page boundaries.
TEST(adrp_page_boundaries)2189 TEST(adrp_page_boundaries) {
2190 VIXL_STATIC_ASSERT(kPageSize == 4096);
2191 AdrpPageBoundaryHelper(kInstructionSize * 0);
2192 AdrpPageBoundaryHelper(kInstructionSize * 1);
2193 AdrpPageBoundaryHelper(kInstructionSize * 512);
2194 AdrpPageBoundaryHelper(kInstructionSize * 1022);
2195 AdrpPageBoundaryHelper(kInstructionSize * 1023);
2196 }
2197
2198
AdrpOffsetHelper(int64_t offset)2199 static void AdrpOffsetHelper(int64_t offset) {
2200 const size_t kPageOffsetMask = kPageSize - 1;
2201 const int kMaxCodeSize = 2 * kPageSize;
2202
2203 SETUP_CUSTOM(kMaxCodeSize, PageOffsetDependentCode);
2204 START();
2205
2206 Label page;
2207
2208 {
2209 ExactAssemblyScope scope(&masm,
2210 kMaxCodeSize,
2211 ExactAssemblyScope::kMaximumSize);
2212 // Initialize NZCV with `eq` flags.
2213 __ cmp(wzr, wzr);
2214 // Waste space until the start of a page.
2215 while ((masm.GetCursorAddress<uintptr_t>() & kPageOffsetMask) != 0) {
2216 __ b(&page);
2217 }
2218 __ bind(&page);
2219
2220 {
2221 ExactAssemblyScope scope_page(&masm, kPageSize);
2222 // Every adrp instruction on this page should return the same value.
2223 __ adrp(x0, offset);
2224 __ adrp(x1, offset);
2225 for (size_t i = 2; i < kPageSize / kInstructionSize; i += 2) {
2226 __ ccmp(x0, x1, NoFlag, eq);
2227 __ adrp(x1, offset);
2228 }
2229 }
2230 }
2231
2232 END();
2233 if (CAN_RUN()) {
2234 RUN();
2235
2236 uintptr_t expected =
2237 masm.GetLabelAddress<uintptr_t>(&page) + (kPageSize * offset);
2238 ASSERT_EQUAL_64(expected, x0);
2239 ASSERT_EQUAL_64(expected, x1);
2240 ASSERT_EQUAL_NZCV(ZCFlag);
2241 }
2242 }
2243
2244
2245 // Check that adrp produces the correct result for a specific offset.
TEST(adrp_offset)2246 TEST(adrp_offset) {
2247 AdrpOffsetHelper(0);
2248 AdrpOffsetHelper(1);
2249 AdrpOffsetHelper(-1);
2250 AdrpOffsetHelper(4);
2251 AdrpOffsetHelper(-4);
2252 AdrpOffsetHelper(0x000fffff);
2253 AdrpOffsetHelper(-0x000fffff);
2254 AdrpOffsetHelper(-0x00100000);
2255 }
2256
2257
TEST(branch_cond)2258 TEST(branch_cond) {
2259 SETUP();
2260
2261 Label done, wrong;
2262
2263 START();
2264 __ Mov(x0, 0x1);
2265 __ Mov(x1, 0x1);
2266 __ Mov(x2, 0x8000000000000000);
2267
2268 // For each 'cmp' instruction below, condition codes other than the ones
2269 // following it would branch.
2270
2271 __ Cmp(x1, 0);
2272 __ B(&wrong, eq);
2273 __ B(&wrong, lo);
2274 __ B(&wrong, mi);
2275 __ B(&wrong, vs);
2276 __ B(&wrong, ls);
2277 __ B(&wrong, lt);
2278 __ B(&wrong, le);
2279 Label ok_1;
2280 __ B(&ok_1, ne);
2281 __ Mov(x0, 0x0);
2282 __ Bind(&ok_1);
2283
2284 __ Cmp(x1, 1);
2285 __ B(&wrong, ne);
2286 __ B(&wrong, lo);
2287 __ B(&wrong, mi);
2288 __ B(&wrong, vs);
2289 __ B(&wrong, hi);
2290 __ B(&wrong, lt);
2291 __ B(&wrong, gt);
2292 Label ok_2;
2293 __ B(&ok_2, pl);
2294 __ Mov(x0, 0x0);
2295 __ Bind(&ok_2);
2296
2297 __ Cmp(x1, 2);
2298 __ B(&wrong, eq);
2299 __ B(&wrong, hs);
2300 __ B(&wrong, pl);
2301 __ B(&wrong, vs);
2302 __ B(&wrong, hi);
2303 __ B(&wrong, ge);
2304 __ B(&wrong, gt);
2305 Label ok_3;
2306 __ B(&ok_3, vc);
2307 __ Mov(x0, 0x0);
2308 __ Bind(&ok_3);
2309
2310 __ Cmp(x2, 1);
2311 __ B(&wrong, eq);
2312 __ B(&wrong, lo);
2313 __ B(&wrong, mi);
2314 __ B(&wrong, vc);
2315 __ B(&wrong, ls);
2316 __ B(&wrong, ge);
2317 __ B(&wrong, gt);
2318 Label ok_4;
2319 __ B(&ok_4, le);
2320 __ Mov(x0, 0x0);
2321 __ Bind(&ok_4);
2322
2323 // The MacroAssembler does not allow al as a branch condition.
2324 Label ok_5;
2325 {
2326 ExactAssemblyScope scope(&masm, kInstructionSize);
2327 __ b(&ok_5, al);
2328 }
2329 __ Mov(x0, 0x0);
2330 __ Bind(&ok_5);
2331
2332 // The MacroAssembler does not allow nv as a branch condition.
2333 Label ok_6;
2334 {
2335 ExactAssemblyScope scope(&masm, kInstructionSize);
2336 __ b(&ok_6, nv);
2337 }
2338 __ Mov(x0, 0x0);
2339 __ Bind(&ok_6);
2340
2341 __ B(&done);
2342
2343 __ Bind(&wrong);
2344 __ Mov(x0, 0x0);
2345
2346 __ Bind(&done);
2347 END();
2348
2349 if (CAN_RUN()) {
2350 RUN();
2351
2352 ASSERT_EQUAL_64(0x1, x0);
2353 }
2354 }
2355
2356
TEST(branch_to_reg)2357 TEST(branch_to_reg) {
2358 SETUP();
2359
2360 // Test br.
2361 Label fn1, after_fn1;
2362
2363 START();
2364 __ Mov(x29, lr);
2365
2366 __ Mov(x1, 0);
2367 __ B(&after_fn1);
2368
2369 __ Bind(&fn1);
2370 __ Mov(x0, lr);
2371 __ Mov(x1, 42);
2372 __ Br(x0);
2373
2374 __ Bind(&after_fn1);
2375 __ Bl(&fn1);
2376
2377 // Test blr.
2378 Label fn2, after_fn2, after_bl2;
2379
2380 __ Mov(x2, 0);
2381 __ B(&after_fn2);
2382
2383 __ Bind(&fn2);
2384 __ Mov(x0, lr);
2385 __ Mov(x2, 84);
2386 __ Blr(x0);
2387
2388 __ Bind(&after_fn2);
2389 __ Bl(&fn2);
2390 __ Bind(&after_bl2);
2391 __ Mov(x3, lr);
2392 __ Adr(x4, &after_bl2);
2393 __ Adr(x5, &after_fn2);
2394
2395 __ Mov(lr, x29);
2396 END();
2397
2398 if (CAN_RUN()) {
2399 RUN();
2400
2401 ASSERT_EQUAL_64(x4, x0);
2402 ASSERT_EQUAL_64(x5, x3);
2403 ASSERT_EQUAL_64(42, x1);
2404 ASSERT_EQUAL_64(84, x2);
2405 }
2406 }
2407
TEST(branch_to_reg_auth_a)2408 TEST(branch_to_reg_auth_a) {
2409 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
2410
2411 START();
2412
2413 Label fn1, after_fn1;
2414
2415 __ Mov(x28, 0x477d469dec0b8760);
2416 __ Mov(x29, lr);
2417
2418 __ Mov(x1, 0);
2419 __ B(&after_fn1);
2420
2421 __ Bind(&fn1);
2422 __ Mov(x0, lr);
2423 __ Mov(x1, 42);
2424 __ Pacia(x0, x28);
2425 __ Braa(x0, x28);
2426
2427 __ Bind(&after_fn1);
2428 __ Bl(&fn1);
2429
2430 Label fn2, after_fn2, after_bl2;
2431
2432 __ Mov(x2, 0);
2433 __ B(&after_fn2);
2434
2435 __ Bind(&fn2);
2436 __ Mov(x0, lr);
2437 __ Mov(x2, 84);
2438 __ Pacia(x0, x28);
2439 __ Blraa(x0, x28);
2440
2441 __ Bind(&after_fn2);
2442 __ Bl(&fn2);
2443 __ Bind(&after_bl2);
2444 __ Mov(x3, lr);
2445 __ Adr(x4, &after_bl2);
2446 __ Adr(x5, &after_fn2);
2447
2448 __ Xpaci(x0);
2449 __ Mov(lr, x29);
2450 END();
2451
2452 if (CAN_RUN()) {
2453 RUN();
2454
2455 ASSERT_EQUAL_64(x4, x0);
2456 ASSERT_EQUAL_64(x5, x3);
2457 ASSERT_EQUAL_64(42, x1);
2458 ASSERT_EQUAL_64(84, x2);
2459 }
2460 }
2461
TEST(return_to_reg_auth)2462 TEST(return_to_reg_auth) {
2463 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
2464
2465 START();
2466
2467 Label fn1, after_fn1;
2468
2469 __ Mov(x28, sp);
2470 __ Mov(x29, lr);
2471 __ Mov(sp, 0x477d469dec0b8760);
2472
2473 __ Mov(x0, 0);
2474 __ B(&after_fn1);
2475
2476 __ Bind(&fn1);
2477 __ Mov(x0, 42);
2478 __ Paciasp();
2479 __ Retaa();
2480
2481 __ Bind(&after_fn1);
2482 __ Bl(&fn1);
2483
2484 Label fn2, after_fn2;
2485
2486 __ Mov(x1, 0);
2487 __ B(&after_fn2);
2488
2489 __ Bind(&fn2);
2490 __ Mov(x1, 84);
2491 __ Pacibsp();
2492 __ Retab();
2493
2494 __ Bind(&after_fn2);
2495 __ Bl(&fn2);
2496
2497 __ Mov(sp, x28);
2498 __ Mov(lr, x29);
2499 END();
2500
2501 if (CAN_RUN()) {
2502 RUN();
2503
2504 ASSERT_EQUAL_64(42, x0);
2505 ASSERT_EQUAL_64(84, x1);
2506 }
2507 }
2508
TEST(return_to_reg_auth_guarded)2509 TEST(return_to_reg_auth_guarded) {
2510 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
2511
2512 START();
2513
2514 Label fn1, after_fn1;
2515
2516 __ Mov(x28, sp);
2517 __ Mov(x29, lr);
2518 __ Mov(sp, 0x477d469dec0b8760);
2519
2520 __ Mov(x0, 0);
2521 __ B(&after_fn1);
2522
2523 __ Bind(&fn1, EmitPACIASP);
2524 __ Mov(x0, 42);
2525 __ Retaa();
2526
2527 __ Bind(&after_fn1);
2528 __ Adr(x2, &fn1);
2529 __ Blr(x2);
2530
2531 Label fn2, after_fn2;
2532
2533 __ Mov(x1, 0);
2534 __ B(&after_fn2);
2535
2536 __ Bind(&fn2, EmitPACIBSP);
2537 __ Mov(x1, 84);
2538 __ Retab();
2539
2540 __ Bind(&after_fn2);
2541 __ Adr(x2, &fn2);
2542 __ Blr(x2);
2543
2544 __ Mov(sp, x28);
2545 __ Mov(lr, x29);
2546 END();
2547
2548 if (CAN_RUN()) {
2549 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
2550 simulator.SetGuardedPages(true);
2551 #else
2552 VIXL_UNIMPLEMENTED();
2553 #endif
2554 RUN();
2555
2556 ASSERT_EQUAL_64(42, x0);
2557 ASSERT_EQUAL_64(84, x1);
2558 }
2559 }
2560
2561 #ifdef VIXL_NEGATIVE_TESTING
TEST(branch_to_reg_auth_fail)2562 TEST(branch_to_reg_auth_fail) {
2563 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
2564
2565 START();
2566
2567 Label fn1, after_fn1;
2568
2569 __ Mov(x29, lr);
2570
2571 __ B(&after_fn1);
2572
2573 __ Bind(&fn1);
2574 __ Mov(x0, lr);
2575 __ Pacizb(x0);
2576 __ Blraaz(x0);
2577
2578 __ Bind(&after_fn1);
2579 // There is a small but not negligible chance (1 in 127 runs) that the PAC
2580 // codes for keys A and B will collide and BLRAAZ won't abort. To mitigate
2581 // this, we simply repeat the test a few more times.
2582 for (unsigned i = 0; i < 32; i++) {
2583 __ Bl(&fn1);
2584 }
2585
2586 __ Mov(lr, x29);
2587 END();
2588
2589 if (CAN_RUN()) {
2590 MUST_FAIL_WITH_MESSAGE(RUN(), "Failed to authenticate pointer.");
2591 }
2592 }
2593 #endif // VIXL_NEGATIVE_TESTING
2594
2595 #ifdef VIXL_NEGATIVE_TESTING
TEST(return_to_reg_auth_fail)2596 TEST(return_to_reg_auth_fail) {
2597 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
2598
2599 START();
2600
2601 Label fn1, after_fn1;
2602
2603 __ Mov(x28, sp);
2604 __ Mov(x29, lr);
2605 __ Mov(sp, 0x477d469dec0b8760);
2606
2607 __ B(&after_fn1);
2608
2609 __ Bind(&fn1);
2610 __ Paciasp();
2611 __ Retab();
2612
2613 __ Bind(&after_fn1);
2614 // There is a small but not negligible chance (1 in 127 runs) that the PAC
2615 // codes for keys A and B will collide and RETAB won't abort. To mitigate
2616 // this, we simply repeat the test a few more times.
2617 for (unsigned i = 0; i < 32; i++) {
2618 __ Bl(&fn1);
2619 }
2620
2621 __ Mov(sp, x28);
2622 __ Mov(lr, x29);
2623 END();
2624
2625 if (CAN_RUN()) {
2626 MUST_FAIL_WITH_MESSAGE(RUN(), "Failed to authenticate pointer.");
2627 }
2628 }
2629 #endif // VIXL_NEGATIVE_TESTING
2630
TEST(branch_to_reg_auth_a_zero)2631 TEST(branch_to_reg_auth_a_zero) {
2632 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
2633
2634 START();
2635
2636 Label fn1, after_fn1;
2637
2638 __ Mov(x29, lr);
2639
2640 __ Mov(x1, 0);
2641 __ B(&after_fn1);
2642
2643 __ Bind(&fn1);
2644 __ Mov(x0, lr);
2645 __ Mov(x1, 42);
2646 __ Paciza(x0);
2647 __ Braaz(x0);
2648
2649 __ Bind(&after_fn1);
2650 __ Bl(&fn1);
2651
2652 Label fn2, after_fn2, after_bl2;
2653
2654 __ Mov(x2, 0);
2655 __ B(&after_fn2);
2656
2657 __ Bind(&fn2);
2658 __ Mov(x0, lr);
2659 __ Mov(x2, 84);
2660 __ Paciza(x0);
2661 __ Blraaz(x0);
2662
2663 __ Bind(&after_fn2);
2664 __ Bl(&fn2);
2665 __ Bind(&after_bl2);
2666 __ Mov(x3, lr);
2667 __ Adr(x4, &after_bl2);
2668 __ Adr(x5, &after_fn2);
2669
2670 __ Xpaci(x0);
2671 __ Mov(lr, x29);
2672 END();
2673
2674 if (CAN_RUN()) {
2675 RUN();
2676
2677 ASSERT_EQUAL_64(x4, x0);
2678 ASSERT_EQUAL_64(x5, x3);
2679 ASSERT_EQUAL_64(42, x1);
2680 ASSERT_EQUAL_64(84, x2);
2681 }
2682 }
2683
2684
TEST(compare_branch)2685 TEST(compare_branch) {
2686 SETUP();
2687
2688 START();
2689 __ Mov(x0, 0);
2690 __ Mov(x1, 0);
2691 __ Mov(x2, 0);
2692 __ Mov(x3, 0);
2693 __ Mov(x4, 0);
2694 __ Mov(x5, 0);
2695 __ Mov(x16, 0);
2696 __ Mov(x17, 42);
2697
2698 Label zt, zt_end;
2699 __ Cbz(w16, &zt);
2700 __ B(&zt_end);
2701 __ Bind(&zt);
2702 __ Mov(x0, 1);
2703 __ Bind(&zt_end);
2704
2705 Label zf, zf_end;
2706 __ Cbz(x17, &zf);
2707 __ B(&zf_end);
2708 __ Bind(&zf);
2709 __ Mov(x1, 1);
2710 __ Bind(&zf_end);
2711
2712 Label nzt, nzt_end;
2713 __ Cbnz(w17, &nzt);
2714 __ B(&nzt_end);
2715 __ Bind(&nzt);
2716 __ Mov(x2, 1);
2717 __ Bind(&nzt_end);
2718
2719 Label nzf, nzf_end;
2720 __ Cbnz(x16, &nzf);
2721 __ B(&nzf_end);
2722 __ Bind(&nzf);
2723 __ Mov(x3, 1);
2724 __ Bind(&nzf_end);
2725
2726 __ Mov(x18, 0xffffffff00000000);
2727
2728 Label a, a_end;
2729 __ Cbz(w18, &a);
2730 __ B(&a_end);
2731 __ Bind(&a);
2732 __ Mov(x4, 1);
2733 __ Bind(&a_end);
2734
2735 Label b, b_end;
2736 __ Cbnz(w18, &b);
2737 __ B(&b_end);
2738 __ Bind(&b);
2739 __ Mov(x5, 1);
2740 __ Bind(&b_end);
2741
2742 END();
2743
2744 if (CAN_RUN()) {
2745 RUN();
2746
2747 ASSERT_EQUAL_64(1, x0);
2748 ASSERT_EQUAL_64(0, x1);
2749 ASSERT_EQUAL_64(1, x2);
2750 ASSERT_EQUAL_64(0, x3);
2751 ASSERT_EQUAL_64(1, x4);
2752 ASSERT_EQUAL_64(0, x5);
2753 }
2754 }
2755
2756
TEST(test_branch)2757 TEST(test_branch) {
2758 SETUP();
2759
2760 START();
2761 __ Mov(x0, 0);
2762 __ Mov(x1, 0);
2763 __ Mov(x2, 0);
2764 __ Mov(x3, 0);
2765 __ Mov(x16, 0xaaaaaaaaaaaaaaaa);
2766
2767 Label bz, bz_end;
2768 __ Tbz(w16, 0, &bz);
2769 __ B(&bz_end);
2770 __ Bind(&bz);
2771 __ Mov(x0, 1);
2772 __ Bind(&bz_end);
2773
2774 Label bo, bo_end;
2775 __ Tbz(x16, 63, &bo);
2776 __ B(&bo_end);
2777 __ Bind(&bo);
2778 __ Mov(x1, 1);
2779 __ Bind(&bo_end);
2780
2781 Label nbz, nbz_end;
2782 __ Tbnz(x16, 61, &nbz);
2783 __ B(&nbz_end);
2784 __ Bind(&nbz);
2785 __ Mov(x2, 1);
2786 __ Bind(&nbz_end);
2787
2788 Label nbo, nbo_end;
2789 __ Tbnz(w16, 2, &nbo);
2790 __ B(&nbo_end);
2791 __ Bind(&nbo);
2792 __ Mov(x3, 1);
2793 __ Bind(&nbo_end);
2794 END();
2795
2796 if (CAN_RUN()) {
2797 RUN();
2798
2799 ASSERT_EQUAL_64(1, x0);
2800 ASSERT_EQUAL_64(0, x1);
2801 ASSERT_EQUAL_64(1, x2);
2802 ASSERT_EQUAL_64(0, x3);
2803 }
2804 }
2805
2806
TEST(branch_type)2807 TEST(branch_type) {
2808 SETUP();
2809
2810 Label fail, done;
2811
2812 START();
2813 __ Mov(x0, 0x0);
2814 __ Mov(x10, 0x7);
2815 __ Mov(x11, 0x0);
2816
2817 // Test non taken branches.
2818 __ Cmp(x10, 0x7);
2819 __ B(&fail, ne);
2820 __ B(&fail, never);
2821 __ B(&fail, reg_zero, x10);
2822 __ B(&fail, reg_not_zero, x11);
2823 __ B(&fail, reg_bit_clear, x10, 0);
2824 __ B(&fail, reg_bit_set, x10, 3);
2825
2826 // Test taken branches.
2827 Label l1, l2, l3, l4, l5;
2828 __ Cmp(x10, 0x7);
2829 __ B(&l1, eq);
2830 __ B(&fail);
2831 __ Bind(&l1);
2832 __ B(&l2, always);
2833 __ B(&fail);
2834 __ Bind(&l2);
2835 __ B(&l3, reg_not_zero, x10);
2836 __ B(&fail);
2837 __ Bind(&l3);
2838 __ B(&l4, reg_bit_clear, x10, 15);
2839 __ B(&fail);
2840 __ Bind(&l4);
2841 __ B(&l5, reg_bit_set, x10, 1);
2842 __ B(&fail);
2843 __ Bind(&l5);
2844
2845 __ B(&done);
2846
2847 __ Bind(&fail);
2848 __ Mov(x0, 0x1);
2849
2850 __ Bind(&done);
2851
2852 END();
2853
2854 if (CAN_RUN()) {
2855 RUN();
2856
2857 ASSERT_EQUAL_64(0x0, x0);
2858 }
2859 }
2860
2861
TEST(ldr_str_offset)2862 TEST(ldr_str_offset) {
2863 SETUP();
2864
2865 uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
2866 uint64_t dst[5] = {0, 0, 0, 0, 0};
2867 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2868 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2869
2870 START();
2871 __ Mov(x17, src_base);
2872 __ Mov(x18, dst_base);
2873 __ Ldr(w0, MemOperand(x17));
2874 __ Str(w0, MemOperand(x18));
2875 __ Ldr(w1, MemOperand(x17, 4));
2876 __ Str(w1, MemOperand(x18, 12));
2877 __ Ldr(x2, MemOperand(x17, 8));
2878 __ Str(x2, MemOperand(x18, 16));
2879 __ Ldrb(w3, MemOperand(x17, 1));
2880 __ Strb(w3, MemOperand(x18, 25));
2881 __ Ldrh(w4, MemOperand(x17, 2));
2882 __ Strh(w4, MemOperand(x18, 33));
2883 END();
2884
2885 if (CAN_RUN()) {
2886 RUN();
2887
2888 ASSERT_EQUAL_64(0x76543210, x0);
2889 ASSERT_EQUAL_64(0x76543210, dst[0]);
2890 ASSERT_EQUAL_64(0xfedcba98, x1);
2891 ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2892 ASSERT_EQUAL_64(0x0123456789abcdef, x2);
2893 ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
2894 ASSERT_EQUAL_64(0x32, x3);
2895 ASSERT_EQUAL_64(0x3200, dst[3]);
2896 ASSERT_EQUAL_64(0x7654, x4);
2897 ASSERT_EQUAL_64(0x765400, dst[4]);
2898 ASSERT_EQUAL_64(src_base, x17);
2899 ASSERT_EQUAL_64(dst_base, x18);
2900 }
2901 }
2902
2903
TEST(ldr_str_wide)2904 TEST(ldr_str_wide) {
2905 SETUP();
2906
2907 uint32_t src[8192];
2908 uint32_t dst[8192];
2909 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2910 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2911 memset(src, 0xaa, 8192 * sizeof(src[0]));
2912 memset(dst, 0xaa, 8192 * sizeof(dst[0]));
2913 src[0] = 0;
2914 src[6144] = 6144;
2915 src[8191] = 8191;
2916
2917 START();
2918 __ Mov(x22, src_base);
2919 __ Mov(x23, dst_base);
2920 __ Mov(x24, src_base);
2921 __ Mov(x25, dst_base);
2922 __ Mov(x26, src_base);
2923 __ Mov(x27, dst_base);
2924
2925 __ Ldr(w0, MemOperand(x22, 8191 * sizeof(src[0])));
2926 __ Str(w0, MemOperand(x23, 8191 * sizeof(dst[0])));
2927 __ Ldr(w1, MemOperand(x24, 4096 * sizeof(src[0]), PostIndex));
2928 __ Str(w1, MemOperand(x25, 4096 * sizeof(dst[0]), PostIndex));
2929 __ Ldr(w2, MemOperand(x26, 6144 * sizeof(src[0]), PreIndex));
2930 __ Str(w2, MemOperand(x27, 6144 * sizeof(dst[0]), PreIndex));
2931 END();
2932
2933 if (CAN_RUN()) {
2934 RUN();
2935
2936 ASSERT_EQUAL_32(8191, w0);
2937 ASSERT_EQUAL_32(8191, dst[8191]);
2938 ASSERT_EQUAL_64(src_base, x22);
2939 ASSERT_EQUAL_64(dst_base, x23);
2940 ASSERT_EQUAL_32(0, w1);
2941 ASSERT_EQUAL_32(0, dst[0]);
2942 ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
2943 ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
2944 ASSERT_EQUAL_32(6144, w2);
2945 ASSERT_EQUAL_32(6144, dst[6144]);
2946 ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
2947 ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
2948 }
2949 }
2950
2951
TEST(ldr_str_preindex)2952 TEST(ldr_str_preindex) {
2953 SETUP();
2954
2955 uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
2956 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
2957 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
2958 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
2959
2960 START();
2961 __ Mov(x17, src_base);
2962 __ Mov(x18, dst_base);
2963 __ Mov(x19, src_base);
2964 __ Mov(x20, dst_base);
2965 __ Mov(x21, src_base + 16);
2966 __ Mov(x22, dst_base + 40);
2967 __ Mov(x23, src_base);
2968 __ Mov(x24, dst_base);
2969 __ Mov(x25, src_base);
2970 __ Mov(x26, dst_base);
2971 __ Ldr(w0, MemOperand(x17, 4, PreIndex));
2972 __ Str(w0, MemOperand(x18, 12, PreIndex));
2973 __ Ldr(x1, MemOperand(x19, 8, PreIndex));
2974 __ Str(x1, MemOperand(x20, 16, PreIndex));
2975 __ Ldr(w2, MemOperand(x21, -4, PreIndex));
2976 __ Str(w2, MemOperand(x22, -4, PreIndex));
2977 __ Ldrb(w3, MemOperand(x23, 1, PreIndex));
2978 __ Strb(w3, MemOperand(x24, 25, PreIndex));
2979 __ Ldrh(w4, MemOperand(x25, 3, PreIndex));
2980 __ Strh(w4, MemOperand(x26, 41, PreIndex));
2981 END();
2982
2983 if (CAN_RUN()) {
2984 RUN();
2985
2986 ASSERT_EQUAL_64(0xfedcba98, x0);
2987 ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
2988 ASSERT_EQUAL_64(0x0123456789abcdef, x1);
2989 ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
2990 ASSERT_EQUAL_64(0x01234567, x2);
2991 ASSERT_EQUAL_64(0x0123456700000000, dst[4]);
2992 ASSERT_EQUAL_64(0x32, x3);
2993 ASSERT_EQUAL_64(0x3200, dst[3]);
2994 ASSERT_EQUAL_64(0x9876, x4);
2995 ASSERT_EQUAL_64(0x987600, dst[5]);
2996 ASSERT_EQUAL_64(src_base + 4, x17);
2997 ASSERT_EQUAL_64(dst_base + 12, x18);
2998 ASSERT_EQUAL_64(src_base + 8, x19);
2999 ASSERT_EQUAL_64(dst_base + 16, x20);
3000 ASSERT_EQUAL_64(src_base + 12, x21);
3001 ASSERT_EQUAL_64(dst_base + 36, x22);
3002 ASSERT_EQUAL_64(src_base + 1, x23);
3003 ASSERT_EQUAL_64(dst_base + 25, x24);
3004 ASSERT_EQUAL_64(src_base + 3, x25);
3005 ASSERT_EQUAL_64(dst_base + 41, x26);
3006 }
3007 }
3008
3009
TEST(ldr_str_postindex)3010 TEST(ldr_str_postindex) {
3011 SETUP();
3012
3013 uint64_t src[2] = {0xfedcba9876543210, 0x0123456789abcdef};
3014 uint64_t dst[6] = {0, 0, 0, 0, 0, 0};
3015 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3016 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3017
3018 START();
3019 __ Mov(x17, src_base + 4);
3020 __ Mov(x18, dst_base + 12);
3021 __ Mov(x19, src_base + 8);
3022 __ Mov(x20, dst_base + 16);
3023 __ Mov(x21, src_base + 8);
3024 __ Mov(x22, dst_base + 32);
3025 __ Mov(x23, src_base + 1);
3026 __ Mov(x24, dst_base + 25);
3027 __ Mov(x25, src_base + 3);
3028 __ Mov(x26, dst_base + 41);
3029 __ Ldr(w0, MemOperand(x17, 4, PostIndex));
3030 __ Str(w0, MemOperand(x18, 12, PostIndex));
3031 __ Ldr(x1, MemOperand(x19, 8, PostIndex));
3032 __ Str(x1, MemOperand(x20, 16, PostIndex));
3033 __ Ldr(x2, MemOperand(x21, -8, PostIndex));
3034 __ Str(x2, MemOperand(x22, -32, PostIndex));
3035 __ Ldrb(w3, MemOperand(x23, 1, PostIndex));
3036 __ Strb(w3, MemOperand(x24, 5, PostIndex));
3037 __ Ldrh(w4, MemOperand(x25, -3, PostIndex));
3038 __ Strh(w4, MemOperand(x26, -41, PostIndex));
3039 END();
3040
3041 if (CAN_RUN()) {
3042 RUN();
3043
3044 ASSERT_EQUAL_64(0xfedcba98, x0);
3045 ASSERT_EQUAL_64(0xfedcba9800000000, dst[1]);
3046 ASSERT_EQUAL_64(0x0123456789abcdef, x1);
3047 ASSERT_EQUAL_64(0x0123456789abcdef, dst[2]);
3048 ASSERT_EQUAL_64(0x0123456789abcdef, x2);
3049 ASSERT_EQUAL_64(0x0123456789abcdef, dst[4]);
3050 ASSERT_EQUAL_64(0x32, x3);
3051 ASSERT_EQUAL_64(0x3200, dst[3]);
3052 ASSERT_EQUAL_64(0x9876, x4);
3053 ASSERT_EQUAL_64(0x987600, dst[5]);
3054 ASSERT_EQUAL_64(src_base + 8, x17);
3055 ASSERT_EQUAL_64(dst_base + 24, x18);
3056 ASSERT_EQUAL_64(src_base + 16, x19);
3057 ASSERT_EQUAL_64(dst_base + 32, x20);
3058 ASSERT_EQUAL_64(src_base, x21);
3059 ASSERT_EQUAL_64(dst_base, x22);
3060 ASSERT_EQUAL_64(src_base + 2, x23);
3061 ASSERT_EQUAL_64(dst_base + 30, x24);
3062 ASSERT_EQUAL_64(src_base, x25);
3063 ASSERT_EQUAL_64(dst_base, x26);
3064 }
3065 }
3066
3067
TEST(ldr_str_largeindex)3068 TEST(ldr_str_largeindex) {
3069 SETUP();
3070
3071 // This value won't fit in the immediate offset field of ldr/str instructions.
3072 int largeoffset = 0xabcdef;
3073
3074 int64_t data[3] = {0x1122334455667788, 0, 0};
3075 uint64_t base_addr = reinterpret_cast<uintptr_t>(data);
3076 uint64_t drifted_addr = base_addr - largeoffset;
3077
3078 // This test checks that we we can use large immediate offsets when
3079 // using PreIndex or PostIndex addressing mode of the MacroAssembler
3080 // Ldr/Str instructions.
3081
3082 START();
3083 __ Mov(x19, drifted_addr);
3084 __ Ldr(x0, MemOperand(x19, largeoffset, PreIndex));
3085
3086 __ Mov(x20, base_addr);
3087 __ Ldr(x1, MemOperand(x20, largeoffset, PostIndex));
3088
3089 __ Mov(x21, drifted_addr);
3090 __ Str(x0, MemOperand(x21, largeoffset + 8, PreIndex));
3091
3092 __ Mov(x22, base_addr + 16);
3093 __ Str(x0, MemOperand(x22, largeoffset, PostIndex));
3094 END();
3095
3096 if (CAN_RUN()) {
3097 RUN();
3098
3099 ASSERT_EQUAL_64(0x1122334455667788, data[0]);
3100 ASSERT_EQUAL_64(0x1122334455667788, data[1]);
3101 ASSERT_EQUAL_64(0x1122334455667788, data[2]);
3102 ASSERT_EQUAL_64(0x1122334455667788, x0);
3103 ASSERT_EQUAL_64(0x1122334455667788, x1);
3104
3105 ASSERT_EQUAL_64(base_addr, x19);
3106 ASSERT_EQUAL_64(base_addr + largeoffset, x20);
3107 ASSERT_EQUAL_64(base_addr + 8, x21);
3108 ASSERT_EQUAL_64(base_addr + 16 + largeoffset, x22);
3109 }
3110 }
3111
3112
TEST(load_signed)3113 TEST(load_signed) {
3114 SETUP();
3115
3116 uint32_t src[2] = {0x80008080, 0x7fff7f7f};
3117 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3118
3119 START();
3120 __ Mov(x24, src_base);
3121 __ Ldrsb(w0, MemOperand(x24));
3122 __ Ldrsb(w1, MemOperand(x24, 4));
3123 __ Ldrsh(w2, MemOperand(x24));
3124 __ Ldrsh(w3, MemOperand(x24, 4));
3125 __ Ldrsb(x4, MemOperand(x24));
3126 __ Ldrsb(x5, MemOperand(x24, 4));
3127 __ Ldrsh(x6, MemOperand(x24));
3128 __ Ldrsh(x7, MemOperand(x24, 4));
3129 __ Ldrsw(x8, MemOperand(x24));
3130 __ Ldrsw(x9, MemOperand(x24, 4));
3131 END();
3132
3133 if (CAN_RUN()) {
3134 RUN();
3135
3136 ASSERT_EQUAL_64(0xffffff80, x0);
3137 ASSERT_EQUAL_64(0x0000007f, x1);
3138 ASSERT_EQUAL_64(0xffff8080, x2);
3139 ASSERT_EQUAL_64(0x00007f7f, x3);
3140 ASSERT_EQUAL_64(0xffffffffffffff80, x4);
3141 ASSERT_EQUAL_64(0x000000000000007f, x5);
3142 ASSERT_EQUAL_64(0xffffffffffff8080, x6);
3143 ASSERT_EQUAL_64(0x0000000000007f7f, x7);
3144 ASSERT_EQUAL_64(0xffffffff80008080, x8);
3145 ASSERT_EQUAL_64(0x000000007fff7f7f, x9);
3146 }
3147 }
3148
3149
TEST(load_store_regoffset)3150 TEST(load_store_regoffset) {
3151 SETUP();
3152
3153 uint32_t src[3] = {1, 2, 3};
3154 uint32_t dst[4] = {0, 0, 0, 0};
3155 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3156 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3157
3158 START();
3159 __ Mov(x16, src_base);
3160 __ Mov(x17, dst_base);
3161 __ Mov(x18, src_base + 3 * sizeof(src[0]));
3162 __ Mov(x19, dst_base + 3 * sizeof(dst[0]));
3163 __ Mov(x20, dst_base + 4 * sizeof(dst[0]));
3164 __ Mov(x24, 0);
3165 __ Mov(x25, 4);
3166 __ Mov(x26, -4);
3167 __ Mov(x27, 0xfffffffc); // 32-bit -4.
3168 __ Mov(x28, 0xfffffffe); // 32-bit -2.
3169 __ Mov(x29, 0xffffffff); // 32-bit -1.
3170
3171 __ Ldr(w0, MemOperand(x16, x24));
3172 __ Ldr(x1, MemOperand(x16, x25));
3173 __ Ldr(w2, MemOperand(x18, x26));
3174 __ Ldr(w3, MemOperand(x18, x27, SXTW));
3175 __ Ldr(w4, MemOperand(x18, x28, SXTW, 2));
3176 __ Str(w0, MemOperand(x17, x24));
3177 __ Str(x1, MemOperand(x17, x25));
3178 __ Str(w2, MemOperand(x20, x29, SXTW, 2));
3179 END();
3180
3181 if (CAN_RUN()) {
3182 RUN();
3183
3184 ASSERT_EQUAL_64(1, x0);
3185 ASSERT_EQUAL_64(0x0000000300000002, x1);
3186 ASSERT_EQUAL_64(3, x2);
3187 ASSERT_EQUAL_64(3, x3);
3188 ASSERT_EQUAL_64(2, x4);
3189 ASSERT_EQUAL_32(1, dst[0]);
3190 ASSERT_EQUAL_32(2, dst[1]);
3191 ASSERT_EQUAL_32(3, dst[2]);
3192 ASSERT_EQUAL_32(3, dst[3]);
3193 }
3194 }
3195
3196
TEST(load_pauth)3197 TEST(load_pauth) {
3198 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
3199
3200 uint64_t src[4] = {1, 2, 3, 4};
3201 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3202
3203 START();
3204 __ Mov(x16, src_base);
3205 __ Mov(x17, src_base);
3206 __ Mov(x18, src_base + 4 * sizeof(src[0]));
3207 __ Mov(x19, src_base + 4 * sizeof(src[0]));
3208
3209 // Add PAC codes to addresses
3210 __ Pacdza(x16);
3211 __ Pacdzb(x17);
3212 __ Pacdza(x18);
3213 __ Pacdzb(x19);
3214
3215 __ Ldraa(x0, MemOperand(x16));
3216 __ Ldraa(x1, MemOperand(x16, sizeof(src[0])));
3217 __ Ldraa(x2, MemOperand(x16, 2 * sizeof(src[0]), PreIndex));
3218 __ Ldraa(x3, MemOperand(x18, -sizeof(src[0])));
3219 __ Ldrab(x4, MemOperand(x17));
3220 __ Ldrab(x5, MemOperand(x17, sizeof(src[0])));
3221 __ Ldrab(x6, MemOperand(x17, 2 * sizeof(src[0]), PreIndex));
3222 __ Ldrab(x7, MemOperand(x19, -sizeof(src[0])));
3223 END();
3224
3225 if (CAN_RUN()) {
3226 RUN();
3227
3228 ASSERT_EQUAL_64(1, x0);
3229 ASSERT_EQUAL_64(2, x1);
3230 ASSERT_EQUAL_64(3, x2);
3231 ASSERT_EQUAL_64(4, x3);
3232 ASSERT_EQUAL_64(1, x4);
3233 ASSERT_EQUAL_64(2, x5);
3234 ASSERT_EQUAL_64(3, x6);
3235 ASSERT_EQUAL_64(4, x7);
3236 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
3237 ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x17);
3238 }
3239 }
3240
3241
3242 #ifdef VIXL_NEGATIVE_TESTING
TEST(load_pauth_negative_test)3243 TEST(load_pauth_negative_test) {
3244 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
3245
3246 uint64_t src[4] = {1, 2, 3, 4};
3247 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3248
3249 START();
3250 __ Mov(x16, src_base);
3251
3252 // There is a small but not negligible chance (1 in 127 runs) that the PAC
3253 // codes for keys A and B will collide and LDRAB won't abort. To mitigate
3254 // this, we simply repeat the test a few more times.
3255 for (unsigned i = 0; i < 32; i++) {
3256 __ Add(x17, x16, i);
3257 __ Pacdza(x17);
3258 __ Ldrab(x0, MemOperand(x17));
3259 }
3260 END();
3261
3262 if (CAN_RUN()) {
3263 MUST_FAIL_WITH_MESSAGE(RUN(), "Failed to authenticate pointer.");
3264 }
3265 }
3266 #endif // VIXL_NEGATIVE_TESTING
3267
3268
TEST(ldp_stp_offset)3269 TEST(ldp_stp_offset) {
3270 SETUP();
3271
3272 uint64_t src[3] = {0x0011223344556677,
3273 0x8899aabbccddeeff,
3274 0xffeeddccbbaa9988};
3275 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
3276 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3277 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3278
3279 START();
3280 __ Mov(x16, src_base);
3281 __ Mov(x17, dst_base);
3282 __ Mov(x18, src_base + 24);
3283 __ Mov(x19, dst_base + 56);
3284 __ Ldp(w0, w1, MemOperand(x16));
3285 __ Ldp(w2, w3, MemOperand(x16, 4));
3286 __ Ldp(x4, x5, MemOperand(x16, 8));
3287 __ Ldp(w6, w7, MemOperand(x18, -12));
3288 __ Ldp(x8, x9, MemOperand(x18, -16));
3289 __ Stp(w0, w1, MemOperand(x17));
3290 __ Stp(w2, w3, MemOperand(x17, 8));
3291 __ Stp(x4, x5, MemOperand(x17, 16));
3292 __ Stp(w6, w7, MemOperand(x19, -24));
3293 __ Stp(x8, x9, MemOperand(x19, -16));
3294 END();
3295
3296 if (CAN_RUN()) {
3297 RUN();
3298
3299 ASSERT_EQUAL_64(0x44556677, x0);
3300 ASSERT_EQUAL_64(0x00112233, x1);
3301 ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
3302 ASSERT_EQUAL_64(0x00112233, x2);
3303 ASSERT_EQUAL_64(0xccddeeff, x3);
3304 ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
3305 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
3306 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
3307 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
3308 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
3309 ASSERT_EQUAL_64(0x8899aabb, x6);
3310 ASSERT_EQUAL_64(0xbbaa9988, x7);
3311 ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
3312 ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
3313 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
3314 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
3315 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
3316 ASSERT_EQUAL_64(src_base, x16);
3317 ASSERT_EQUAL_64(dst_base, x17);
3318 ASSERT_EQUAL_64(src_base + 24, x18);
3319 ASSERT_EQUAL_64(dst_base + 56, x19);
3320 }
3321 }
3322
3323
TEST(ldp_stp_offset_wide)3324 TEST(ldp_stp_offset_wide) {
3325 SETUP();
3326
3327 uint64_t src[3] = {0x0011223344556677,
3328 0x8899aabbccddeeff,
3329 0xffeeddccbbaa9988};
3330 uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
3331 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3332 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3333 // Move base too far from the array to force multiple instructions
3334 // to be emitted.
3335 const int64_t base_offset = 1024;
3336
3337 START();
3338 __ Mov(x20, src_base - base_offset);
3339 __ Mov(x21, dst_base - base_offset);
3340 __ Mov(x18, src_base + base_offset + 24);
3341 __ Mov(x19, dst_base + base_offset + 56);
3342 __ Ldp(w0, w1, MemOperand(x20, base_offset));
3343 __ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
3344 __ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
3345 __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
3346 __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
3347 __ Stp(w0, w1, MemOperand(x21, base_offset));
3348 __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
3349 __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
3350 __ Stp(w6, w7, MemOperand(x19, -24 - base_offset));
3351 __ Stp(x8, x9, MemOperand(x19, -16 - base_offset));
3352 END();
3353
3354 if (CAN_RUN()) {
3355 RUN();
3356
3357 ASSERT_EQUAL_64(0x44556677, x0);
3358 ASSERT_EQUAL_64(0x00112233, x1);
3359 ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
3360 ASSERT_EQUAL_64(0x00112233, x2);
3361 ASSERT_EQUAL_64(0xccddeeff, x3);
3362 ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
3363 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
3364 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
3365 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
3366 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
3367 ASSERT_EQUAL_64(0x8899aabb, x6);
3368 ASSERT_EQUAL_64(0xbbaa9988, x7);
3369 ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
3370 ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
3371 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
3372 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
3373 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
3374 ASSERT_EQUAL_64(src_base - base_offset, x20);
3375 ASSERT_EQUAL_64(dst_base - base_offset, x21);
3376 ASSERT_EQUAL_64(src_base + base_offset + 24, x18);
3377 ASSERT_EQUAL_64(dst_base + base_offset + 56, x19);
3378 }
3379 }
3380
3381
TEST(ldnp_stnp_offset)3382 TEST(ldnp_stnp_offset) {
3383 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
3384
3385 uint64_t src[4] = {0x0011223344556677,
3386 0x8899aabbccddeeff,
3387 0xffeeddccbbaa9988,
3388 0x7766554433221100};
3389 uint64_t dst[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
3390 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3391 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3392
3393 START();
3394 __ Mov(x16, src_base);
3395 __ Mov(x17, dst_base);
3396 __ Mov(x18, src_base + 24);
3397 __ Mov(x19, dst_base + 64);
3398 __ Mov(x20, src_base + 32);
3399
3400 // Ensure address set up has happened before executing non-temporal ops.
3401 __ Dmb(InnerShareable, BarrierAll);
3402
3403 __ Ldnp(w0, w1, MemOperand(x16));
3404 __ Ldnp(w2, w3, MemOperand(x16, 4));
3405 __ Ldnp(x4, x5, MemOperand(x16, 8));
3406 __ Ldnp(w6, w7, MemOperand(x18, -12));
3407 __ Ldnp(x8, x9, MemOperand(x18, -16));
3408 __ Ldnp(q16, q17, MemOperand(x16));
3409 __ Ldnp(q19, q18, MemOperand(x20, -32));
3410 __ Stnp(w0, w1, MemOperand(x17));
3411 __ Stnp(w2, w3, MemOperand(x17, 8));
3412 __ Stnp(x4, x5, MemOperand(x17, 16));
3413 __ Stnp(w6, w7, MemOperand(x19, -32));
3414 __ Stnp(x8, x9, MemOperand(x19, -24));
3415 __ Stnp(q17, q16, MemOperand(x19));
3416 __ Stnp(q18, q19, MemOperand(x19, 32));
3417 END();
3418
3419 if (CAN_RUN()) {
3420 RUN();
3421
3422 ASSERT_EQUAL_64(0x44556677, x0);
3423 ASSERT_EQUAL_64(0x00112233, x1);
3424 ASSERT_EQUAL_64(0x0011223344556677, dst[0]);
3425 ASSERT_EQUAL_64(0x00112233, x2);
3426 ASSERT_EQUAL_64(0xccddeeff, x3);
3427 ASSERT_EQUAL_64(0xccddeeff00112233, dst[1]);
3428 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
3429 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[2]);
3430 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
3431 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[3]);
3432 ASSERT_EQUAL_64(0x8899aabb, x6);
3433 ASSERT_EQUAL_64(0xbbaa9988, x7);
3434 ASSERT_EQUAL_64(0xbbaa99888899aabb, dst[4]);
3435 ASSERT_EQUAL_64(0x8899aabbccddeeff, x8);
3436 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[5]);
3437 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x9);
3438 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[6]);
3439 ASSERT_EQUAL_128(0x8899aabbccddeeff, 0x0011223344556677, q16);
3440 ASSERT_EQUAL_128(0x7766554433221100, 0xffeeddccbbaa9988, q17);
3441 ASSERT_EQUAL_128(0x7766554433221100, 0xffeeddccbbaa9988, q18);
3442 ASSERT_EQUAL_128(0x8899aabbccddeeff, 0x0011223344556677, q19);
3443 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[8]);
3444 ASSERT_EQUAL_64(0x7766554433221100, dst[9]);
3445 ASSERT_EQUAL_64(0x0011223344556677, dst[10]);
3446 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[11]);
3447 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[12]);
3448 ASSERT_EQUAL_64(0x7766554433221100, dst[13]);
3449 ASSERT_EQUAL_64(0x0011223344556677, dst[14]);
3450 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[15]);
3451 ASSERT_EQUAL_64(src_base, x16);
3452 ASSERT_EQUAL_64(dst_base, x17);
3453 ASSERT_EQUAL_64(src_base + 24, x18);
3454 ASSERT_EQUAL_64(dst_base + 64, x19);
3455 ASSERT_EQUAL_64(src_base + 32, x20);
3456 }
3457 }
3458
TEST(ldp_stp_preindex)3459 TEST(ldp_stp_preindex) {
3460 SETUP();
3461
3462 uint64_t src[3] = {0x0011223344556677,
3463 0x8899aabbccddeeff,
3464 0xffeeddccbbaa9988};
3465 uint64_t dst[5] = {0, 0, 0, 0, 0};
3466 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3467 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3468
3469 START();
3470 __ Mov(x16, src_base);
3471 __ Mov(x17, dst_base);
3472 __ Mov(x18, dst_base + 16);
3473 __ Ldp(w0, w1, MemOperand(x16, 4, PreIndex));
3474 __ Mov(x19, x16);
3475 __ Ldp(w2, w3, MemOperand(x16, -4, PreIndex));
3476 __ Stp(w2, w3, MemOperand(x17, 4, PreIndex));
3477 __ Mov(x20, x17);
3478 __ Stp(w0, w1, MemOperand(x17, -4, PreIndex));
3479 __ Ldp(x4, x5, MemOperand(x16, 8, PreIndex));
3480 __ Mov(x21, x16);
3481 __ Ldp(x6, x7, MemOperand(x16, -8, PreIndex));
3482 __ Stp(x7, x6, MemOperand(x18, 8, PreIndex));
3483 __ Mov(x22, x18);
3484 __ Stp(x5, x4, MemOperand(x18, -8, PreIndex));
3485 END();
3486
3487 if (CAN_RUN()) {
3488 RUN();
3489
3490 ASSERT_EQUAL_64(0x00112233, x0);
3491 ASSERT_EQUAL_64(0xccddeeff, x1);
3492 ASSERT_EQUAL_64(0x44556677, x2);
3493 ASSERT_EQUAL_64(0x00112233, x3);
3494 ASSERT_EQUAL_64(0xccddeeff00112233, dst[0]);
3495 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
3496 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
3497 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
3498 ASSERT_EQUAL_64(0x0011223344556677, x6);
3499 ASSERT_EQUAL_64(0x8899aabbccddeeff, x7);
3500 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
3501 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
3502 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
3503 ASSERT_EQUAL_64(src_base, x16);
3504 ASSERT_EQUAL_64(dst_base, x17);
3505 ASSERT_EQUAL_64(dst_base + 16, x18);
3506 ASSERT_EQUAL_64(src_base + 4, x19);
3507 ASSERT_EQUAL_64(dst_base + 4, x20);
3508 ASSERT_EQUAL_64(src_base + 8, x21);
3509 ASSERT_EQUAL_64(dst_base + 24, x22);
3510 }
3511 }
3512
3513
TEST(ldp_stp_preindex_wide)3514 TEST(ldp_stp_preindex_wide) {
3515 SETUP();
3516
3517 uint64_t src[3] = {0x0011223344556677,
3518 0x8899aabbccddeeff,
3519 0xffeeddccbbaa9988};
3520 uint64_t dst[5] = {0, 0, 0, 0, 0};
3521 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3522 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3523 // Move base too far from the array to force multiple instructions
3524 // to be emitted.
3525 const int64_t base_offset = 1024;
3526
3527 START();
3528 __ Mov(x24, src_base - base_offset);
3529 __ Mov(x25, dst_base + base_offset);
3530 __ Mov(x18, dst_base + base_offset + 16);
3531 __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
3532 __ Mov(x19, x24);
3533 __ Mov(x24, src_base - base_offset + 4);
3534 __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PreIndex));
3535 __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PreIndex));
3536 __ Mov(x20, x25);
3537 __ Mov(x25, dst_base + base_offset + 4);
3538 __ Mov(x24, src_base - base_offset);
3539 __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PreIndex));
3540 __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PreIndex));
3541 __ Mov(x21, x24);
3542 __ Mov(x24, src_base - base_offset + 8);
3543 __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
3544 __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
3545 __ Mov(x22, x18);
3546 __ Mov(x18, dst_base + base_offset + 16 + 8);
3547 __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
3548 END();
3549
3550 if (CAN_RUN()) {
3551 RUN();
3552
3553 ASSERT_EQUAL_64(0x00112233, x0);
3554 ASSERT_EQUAL_64(0xccddeeff, x1);
3555 ASSERT_EQUAL_64(0x44556677, x2);
3556 ASSERT_EQUAL_64(0x00112233, x3);
3557 ASSERT_EQUAL_64(0xccddeeff00112233, dst[0]);
3558 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
3559 ASSERT_EQUAL_64(0x8899aabbccddeeff, x4);
3560 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
3561 ASSERT_EQUAL_64(0x0011223344556677, x6);
3562 ASSERT_EQUAL_64(0x8899aabbccddeeff, x7);
3563 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
3564 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
3565 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
3566 ASSERT_EQUAL_64(src_base, x24);
3567 ASSERT_EQUAL_64(dst_base, x25);
3568 ASSERT_EQUAL_64(dst_base + 16, x18);
3569 ASSERT_EQUAL_64(src_base + 4, x19);
3570 ASSERT_EQUAL_64(dst_base + 4, x20);
3571 ASSERT_EQUAL_64(src_base + 8, x21);
3572 ASSERT_EQUAL_64(dst_base + 24, x22);
3573 }
3574 }
3575
3576
TEST(ldp_stp_postindex)3577 TEST(ldp_stp_postindex) {
3578 SETUP();
3579
3580 uint64_t src[4] = {0x0011223344556677,
3581 0x8899aabbccddeeff,
3582 0xffeeddccbbaa9988,
3583 0x7766554433221100};
3584 uint64_t dst[5] = {0, 0, 0, 0, 0};
3585 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3586 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3587
3588 START();
3589 __ Mov(x16, src_base);
3590 __ Mov(x17, dst_base);
3591 __ Mov(x18, dst_base + 16);
3592 __ Ldp(w0, w1, MemOperand(x16, 4, PostIndex));
3593 __ Mov(x19, x16);
3594 __ Ldp(w2, w3, MemOperand(x16, -4, PostIndex));
3595 __ Stp(w2, w3, MemOperand(x17, 4, PostIndex));
3596 __ Mov(x20, x17);
3597 __ Stp(w0, w1, MemOperand(x17, -4, PostIndex));
3598 __ Ldp(x4, x5, MemOperand(x16, 8, PostIndex));
3599 __ Mov(x21, x16);
3600 __ Ldp(x6, x7, MemOperand(x16, -8, PostIndex));
3601 __ Stp(x7, x6, MemOperand(x18, 8, PostIndex));
3602 __ Mov(x22, x18);
3603 __ Stp(x5, x4, MemOperand(x18, -8, PostIndex));
3604 END();
3605
3606 if (CAN_RUN()) {
3607 RUN();
3608
3609 ASSERT_EQUAL_64(0x44556677, x0);
3610 ASSERT_EQUAL_64(0x00112233, x1);
3611 ASSERT_EQUAL_64(0x00112233, x2);
3612 ASSERT_EQUAL_64(0xccddeeff, x3);
3613 ASSERT_EQUAL_64(0x4455667700112233, dst[0]);
3614 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
3615 ASSERT_EQUAL_64(0x0011223344556677, x4);
3616 ASSERT_EQUAL_64(0x8899aabbccddeeff, x5);
3617 ASSERT_EQUAL_64(0x8899aabbccddeeff, x6);
3618 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x7);
3619 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
3620 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
3621 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
3622 ASSERT_EQUAL_64(src_base, x16);
3623 ASSERT_EQUAL_64(dst_base, x17);
3624 ASSERT_EQUAL_64(dst_base + 16, x18);
3625 ASSERT_EQUAL_64(src_base + 4, x19);
3626 ASSERT_EQUAL_64(dst_base + 4, x20);
3627 ASSERT_EQUAL_64(src_base + 8, x21);
3628 ASSERT_EQUAL_64(dst_base + 24, x22);
3629 }
3630 }
3631
3632
TEST(ldp_stp_postindex_wide)3633 TEST(ldp_stp_postindex_wide) {
3634 SETUP();
3635
3636 uint64_t src[4] = {0x0011223344556677,
3637 0x8899aabbccddeeff,
3638 0xffeeddccbbaa9988,
3639 0x7766554433221100};
3640 uint64_t dst[5] = {0, 0, 0, 0, 0};
3641 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3642 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3643 // Move base too far from the array to force multiple instructions
3644 // to be emitted.
3645 const int64_t base_offset = 1024;
3646
3647 START();
3648 __ Mov(x24, src_base);
3649 __ Mov(x25, dst_base);
3650 __ Mov(x18, dst_base + 16);
3651 __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
3652 __ Mov(x19, x24);
3653 __ Sub(x24, x24, base_offset);
3654 __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PostIndex));
3655 __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PostIndex));
3656 __ Mov(x20, x25);
3657 __ Sub(x24, x24, base_offset);
3658 __ Add(x25, x25, base_offset);
3659 __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PostIndex));
3660 __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PostIndex));
3661 __ Mov(x21, x24);
3662 __ Sub(x24, x24, base_offset);
3663 __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
3664 __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
3665 __ Mov(x22, x18);
3666 __ Add(x18, x18, base_offset);
3667 __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
3668 END();
3669
3670 if (CAN_RUN()) {
3671 RUN();
3672
3673 ASSERT_EQUAL_64(0x44556677, x0);
3674 ASSERT_EQUAL_64(0x00112233, x1);
3675 ASSERT_EQUAL_64(0x00112233, x2);
3676 ASSERT_EQUAL_64(0xccddeeff, x3);
3677 ASSERT_EQUAL_64(0x4455667700112233, dst[0]);
3678 ASSERT_EQUAL_64(0x0000000000112233, dst[1]);
3679 ASSERT_EQUAL_64(0x0011223344556677, x4);
3680 ASSERT_EQUAL_64(0x8899aabbccddeeff, x5);
3681 ASSERT_EQUAL_64(0x8899aabbccddeeff, x6);
3682 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x7);
3683 ASSERT_EQUAL_64(0xffeeddccbbaa9988, dst[2]);
3684 ASSERT_EQUAL_64(0x8899aabbccddeeff, dst[3]);
3685 ASSERT_EQUAL_64(0x0011223344556677, dst[4]);
3686 ASSERT_EQUAL_64(src_base + base_offset, x24);
3687 ASSERT_EQUAL_64(dst_base - base_offset, x25);
3688 ASSERT_EQUAL_64(dst_base - base_offset + 16, x18);
3689 ASSERT_EQUAL_64(src_base + base_offset + 4, x19);
3690 ASSERT_EQUAL_64(dst_base - base_offset + 4, x20);
3691 ASSERT_EQUAL_64(src_base + base_offset + 8, x21);
3692 ASSERT_EQUAL_64(dst_base - base_offset + 24, x22);
3693 }
3694 }
3695
3696
TEST(ldp_sign_extend)3697 TEST(ldp_sign_extend) {
3698 SETUP();
3699
3700 uint32_t src[2] = {0x80000000, 0x7fffffff};
3701 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3702
3703 START();
3704 __ Mov(x24, src_base);
3705 __ Ldpsw(x0, x1, MemOperand(x24));
3706 END();
3707
3708 if (CAN_RUN()) {
3709 RUN();
3710
3711 ASSERT_EQUAL_64(0xffffffff80000000, x0);
3712 ASSERT_EQUAL_64(0x000000007fffffff, x1);
3713 }
3714 }
3715
3716
TEST(ldur_stur)3717 TEST(ldur_stur) {
3718 SETUP();
3719
3720 int64_t src[2] = {0x0123456789abcdef, 0x0123456789abcdef};
3721 int64_t dst[5] = {0, 0, 0, 0, 0};
3722 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3723 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3724
3725 START();
3726 __ Mov(x17, src_base);
3727 __ Mov(x18, dst_base);
3728 __ Mov(x19, src_base + 16);
3729 __ Mov(x20, dst_base + 32);
3730 __ Mov(x21, dst_base + 40);
3731 __ Ldr(w0, MemOperand(x17, 1));
3732 __ Str(w0, MemOperand(x18, 2));
3733 __ Ldr(x1, MemOperand(x17, 3));
3734 __ Str(x1, MemOperand(x18, 9));
3735 __ Ldr(w2, MemOperand(x19, -9));
3736 __ Str(w2, MemOperand(x20, -5));
3737 __ Ldrb(w3, MemOperand(x19, -1));
3738 __ Strb(w3, MemOperand(x21, -1));
3739 END();
3740
3741 if (CAN_RUN()) {
3742 RUN();
3743
3744 ASSERT_EQUAL_64(0x6789abcd, x0);
3745 ASSERT_EQUAL_64(0x00006789abcd0000, dst[0]);
3746 ASSERT_EQUAL_64(0xabcdef0123456789, x1);
3747 ASSERT_EQUAL_64(0xcdef012345678900, dst[1]);
3748 ASSERT_EQUAL_64(0x000000ab, dst[2]);
3749 ASSERT_EQUAL_64(0xabcdef01, x2);
3750 ASSERT_EQUAL_64(0x00abcdef01000000, dst[3]);
3751 ASSERT_EQUAL_64(0x00000001, x3);
3752 ASSERT_EQUAL_64(0x0100000000000000, dst[4]);
3753 ASSERT_EQUAL_64(src_base, x17);
3754 ASSERT_EQUAL_64(dst_base, x18);
3755 ASSERT_EQUAL_64(src_base + 16, x19);
3756 ASSERT_EQUAL_64(dst_base + 32, x20);
3757 }
3758 }
3759
3760
TEST(ldur_stur_neon)3761 TEST(ldur_stur_neon) {
3762 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
3763
3764 int64_t src[3] = {0x0123456789abcdef, 0x0123456789abcdef, 0x0123456789abcdef};
3765 int64_t dst[5] = {0, 0, 0, 0, 0};
3766 uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
3767 uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
3768
3769 START();
3770 __ Mov(x17, src_base);
3771 __ Mov(x18, dst_base);
3772 __ Ldr(b0, MemOperand(x17));
3773 __ Str(b0, MemOperand(x18));
3774 __ Ldr(h1, MemOperand(x17, 1));
3775 __ Str(h1, MemOperand(x18, 1));
3776 __ Ldr(s2, MemOperand(x17, 2));
3777 __ Str(s2, MemOperand(x18, 3));
3778 __ Ldr(d3, MemOperand(x17, 3));
3779 __ Str(d3, MemOperand(x18, 7));
3780 __ Ldr(q4, MemOperand(x17, 4));
3781 __ Str(q4, MemOperand(x18, 15));
3782 END();
3783
3784 if (CAN_RUN()) {
3785 RUN();
3786
3787 ASSERT_EQUAL_128(0, 0xef, q0);
3788 ASSERT_EQUAL_128(0, 0xabcd, q1);
3789 ASSERT_EQUAL_128(0, 0x456789ab, q2);
3790 ASSERT_EQUAL_128(0, 0xabcdef0123456789, q3);
3791 ASSERT_EQUAL_128(0x89abcdef01234567, 0x89abcdef01234567, q4);
3792 ASSERT_EQUAL_64(0x89456789ababcdef, dst[0]);
3793 ASSERT_EQUAL_64(0x67abcdef01234567, dst[1]);
3794 ASSERT_EQUAL_64(0x6789abcdef012345, dst[2]);
3795 ASSERT_EQUAL_64(0x0089abcdef012345, dst[3]);
3796 }
3797 }
3798
3799
TEST(ldr_literal)3800 TEST(ldr_literal) {
3801 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
3802
3803 START();
3804 __ Ldr(x2, 0x1234567890abcdef);
3805 __ Ldr(w3, 0xfedcba09);
3806 __ Ldrsw(x4, 0x7fffffff);
3807 __ Ldrsw(x5, 0x80000000);
3808 __ Ldr(q11, 0x1234000056780000, 0xabcd0000ef000000);
3809 __ Ldr(d13, 1.234);
3810 __ Ldr(s25, 2.5);
3811 END();
3812
3813 if (CAN_RUN()) {
3814 RUN();
3815
3816 ASSERT_EQUAL_64(0x1234567890abcdef, x2);
3817 ASSERT_EQUAL_64(0xfedcba09, x3);
3818 ASSERT_EQUAL_64(0x7fffffff, x4);
3819 ASSERT_EQUAL_64(0xffffffff80000000, x5);
3820 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q11);
3821 ASSERT_EQUAL_FP64(1.234, d13);
3822 ASSERT_EQUAL_FP32(2.5, s25);
3823 }
3824 }
3825
3826
TEST(ldr_literal_range)3827 TEST(ldr_literal_range) {
3828 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
3829
3830 START();
3831 // Make sure the pool is empty;
3832 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
3833 ASSERT_LITERAL_POOL_SIZE(0);
3834
3835 // Create some literal pool entries.
3836 __ Ldr(x0, 0x1234567890abcdef);
3837 __ Ldr(w1, 0xfedcba09);
3838 __ Ldrsw(x2, 0x7fffffff);
3839 __ Ldrsw(x3, 0x80000000);
3840 __ Ldr(q2, 0x1234000056780000, 0xabcd0000ef000000);
3841 __ Ldr(d0, 1.234);
3842 __ Ldr(s1, 2.5);
3843 ASSERT_LITERAL_POOL_SIZE(48);
3844
3845 // Emit more code than the maximum literal load range to ensure the pool
3846 // should be emitted.
3847 const ptrdiff_t end = masm.GetCursorOffset() + 2 * kMaxLoadLiteralRange;
3848 while (masm.GetCursorOffset() < end) {
3849 __ Nop();
3850 }
3851
3852 // The pool should have been emitted.
3853 ASSERT_LITERAL_POOL_SIZE(0);
3854
3855 // These loads should be after the pool (and will require a new one).
3856 __ Ldr(x4, 0x34567890abcdef12);
3857 __ Ldr(w5, 0xdcba09fe);
3858 __ Ldrsw(x6, 0x7fffffff);
3859 __ Ldrsw(x7, 0x80000000);
3860 __ Ldr(q6, 0x1234000056780000, 0xabcd0000ef000000);
3861 __ Ldr(d4, 123.4);
3862 __ Ldr(s5, 250.0);
3863 ASSERT_LITERAL_POOL_SIZE(48);
3864 END();
3865
3866 if (CAN_RUN()) {
3867 RUN();
3868
3869 // Check that the literals loaded correctly.
3870 ASSERT_EQUAL_64(0x1234567890abcdef, x0);
3871 ASSERT_EQUAL_64(0xfedcba09, x1);
3872 ASSERT_EQUAL_64(0x7fffffff, x2);
3873 ASSERT_EQUAL_64(0xffffffff80000000, x3);
3874 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q2);
3875 ASSERT_EQUAL_FP64(1.234, d0);
3876 ASSERT_EQUAL_FP32(2.5, s1);
3877 ASSERT_EQUAL_64(0x34567890abcdef12, x4);
3878 ASSERT_EQUAL_64(0xdcba09fe, x5);
3879 ASSERT_EQUAL_64(0x7fffffff, x6);
3880 ASSERT_EQUAL_64(0xffffffff80000000, x7);
3881 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q6);
3882 ASSERT_EQUAL_FP64(123.4, d4);
3883 ASSERT_EQUAL_FP32(250.0, s5);
3884 }
3885 }
3886
3887
3888 template <typename T>
LoadIntValueHelper(T values[],int card)3889 void LoadIntValueHelper(T values[], int card) {
3890 SETUP();
3891
3892 const bool is_32bit = (sizeof(T) == 4);
3893 Register tgt1 = is_32bit ? Register(w1) : Register(x1);
3894 Register tgt2 = is_32bit ? Register(w2) : Register(x2);
3895
3896 START();
3897 __ Mov(x0, 0);
3898
3899 // If one of the values differ then x0 will be one.
3900 for (int i = 0; i < card; ++i) {
3901 __ Mov(tgt1, values[i]);
3902 __ Ldr(tgt2, values[i]);
3903 __ Cmp(tgt1, tgt2);
3904 __ Cset(x0, ne);
3905 }
3906 END();
3907
3908 if (CAN_RUN()) {
3909 RUN();
3910
3911 // If one of the values differs, the trace can be used to identify which
3912 // one.
3913 ASSERT_EQUAL_64(0, x0);
3914 }
3915 }
3916
3917
TEST(ldr_literal_values_x)3918 TEST(ldr_literal_values_x) {
3919 static const uint64_t kValues[] = {0x8000000000000000,
3920 0x7fffffffffffffff,
3921 0x0000000000000000,
3922 0xffffffffffffffff,
3923 0x00ff00ff00ff00ff,
3924 0x1234567890abcdef};
3925
3926 LoadIntValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
3927 }
3928
3929
TEST(ldr_literal_values_w)3930 TEST(ldr_literal_values_w) {
3931 static const uint32_t kValues[] = {0x80000000,
3932 0x7fffffff,
3933 0x00000000,
3934 0xffffffff,
3935 0x00ff00ff,
3936 0x12345678,
3937 0x90abcdef};
3938
3939 LoadIntValueHelper(kValues, sizeof(kValues) / sizeof(kValues[0]));
3940 }
3941
TEST(ldr_literal_custom)3942 TEST(ldr_literal_custom) {
3943 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
3944
3945 Label end_of_pool_before;
3946 Label end_of_pool_after;
3947
3948 const size_t kSizeOfPoolInBytes = 44;
3949
3950 Literal<uint64_t> before_x(0x1234567890abcdef);
3951 Literal<uint32_t> before_w(0xfedcba09);
3952 Literal<uint32_t> before_sx(0x80000000);
3953 Literal<uint64_t> before_q(0x1234000056780000, 0xabcd0000ef000000);
3954 Literal<double> before_d(1.234);
3955 Literal<float> before_s(2.5);
3956
3957 Literal<uint64_t> after_x(0x1234567890abcdef);
3958 Literal<uint32_t> after_w(0xfedcba09);
3959 Literal<uint32_t> after_sx(0x80000000);
3960 Literal<uint64_t> after_q(0x1234000056780000, 0xabcd0000ef000000);
3961 Literal<double> after_d(1.234);
3962 Literal<float> after_s(2.5);
3963
3964 START();
3965
3966 // Manually generate a pool.
3967 __ B(&end_of_pool_before);
3968 {
3969 ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
3970 __ place(&before_x);
3971 __ place(&before_w);
3972 __ place(&before_sx);
3973 __ place(&before_q);
3974 __ place(&before_d);
3975 __ place(&before_s);
3976 }
3977 __ Bind(&end_of_pool_before);
3978
3979 {
3980 ExactAssemblyScope scope(&masm, 12 * kInstructionSize);
3981 __ ldr(x2, &before_x);
3982 __ ldr(w3, &before_w);
3983 __ ldrsw(x5, &before_sx);
3984 __ ldr(q11, &before_q);
3985 __ ldr(d13, &before_d);
3986 __ ldr(s25, &before_s);
3987
3988 __ ldr(x6, &after_x);
3989 __ ldr(w7, &after_w);
3990 __ ldrsw(x8, &after_sx);
3991 __ ldr(q18, &after_q);
3992 __ ldr(d14, &after_d);
3993 __ ldr(s26, &after_s);
3994 }
3995
3996 // Manually generate a pool.
3997 __ B(&end_of_pool_after);
3998 {
3999 ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
4000 __ place(&after_x);
4001 __ place(&after_w);
4002 __ place(&after_sx);
4003 __ place(&after_q);
4004 __ place(&after_d);
4005 __ place(&after_s);
4006 }
4007 __ Bind(&end_of_pool_after);
4008
4009 END();
4010
4011 if (CAN_RUN()) {
4012 RUN();
4013
4014 ASSERT_EQUAL_64(0x1234567890abcdef, x2);
4015 ASSERT_EQUAL_64(0xfedcba09, x3);
4016 ASSERT_EQUAL_64(0xffffffff80000000, x5);
4017 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q11);
4018 ASSERT_EQUAL_FP64(1.234, d13);
4019 ASSERT_EQUAL_FP32(2.5, s25);
4020
4021 ASSERT_EQUAL_64(0x1234567890abcdef, x6);
4022 ASSERT_EQUAL_64(0xfedcba09, x7);
4023 ASSERT_EQUAL_64(0xffffffff80000000, x8);
4024 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q18);
4025 ASSERT_EQUAL_FP64(1.234, d14);
4026 ASSERT_EQUAL_FP32(2.5, s26);
4027 }
4028 }
4029
4030
TEST(ldr_literal_custom_shared)4031 TEST(ldr_literal_custom_shared) {
4032 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
4033
4034 Label end_of_pool_before;
4035 Label end_of_pool_after;
4036
4037 const size_t kSizeOfPoolInBytes = 40;
4038
4039 Literal<uint64_t> before_x(0x1234567890abcdef);
4040 Literal<uint32_t> before_w(0xfedcba09);
4041 Literal<uint64_t> before_q(0x1234000056780000, 0xabcd0000ef000000);
4042 Literal<double> before_d(1.234);
4043 Literal<float> before_s(2.5);
4044
4045 Literal<uint64_t> after_x(0x1234567890abcdef);
4046 Literal<uint32_t> after_w(0xfedcba09);
4047 Literal<uint64_t> after_q(0x1234000056780000, 0xabcd0000ef000000);
4048 Literal<double> after_d(1.234);
4049 Literal<float> after_s(2.5);
4050
4051 START();
4052
4053 // Manually generate a pool.
4054 __ B(&end_of_pool_before);
4055 {
4056 ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
4057 __ place(&before_x);
4058 __ place(&before_w);
4059 __ place(&before_q);
4060 __ place(&before_d);
4061 __ place(&before_s);
4062 }
4063 __ Bind(&end_of_pool_before);
4064
4065 // Load the entries several times to test that literals can be shared.
4066 for (int i = 0; i < 50; i++) {
4067 ExactAssemblyScope scope(&masm, 12 * kInstructionSize);
4068 __ ldr(x2, &before_x);
4069 __ ldr(w3, &before_w);
4070 __ ldrsw(x5, &before_w); // Re-use before_w.
4071 __ ldr(q11, &before_q);
4072 __ ldr(d13, &before_d);
4073 __ ldr(s25, &before_s);
4074
4075 __ ldr(x6, &after_x);
4076 __ ldr(w7, &after_w);
4077 __ ldrsw(x8, &after_w); // Re-use after_w.
4078 __ ldr(q18, &after_q);
4079 __ ldr(d14, &after_d);
4080 __ ldr(s26, &after_s);
4081 }
4082
4083 // Manually generate a pool.
4084 __ B(&end_of_pool_after);
4085 {
4086 ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
4087 __ place(&after_x);
4088 __ place(&after_w);
4089 __ place(&after_q);
4090 __ place(&after_d);
4091 __ place(&after_s);
4092 }
4093 __ Bind(&end_of_pool_after);
4094
4095 END();
4096
4097 if (CAN_RUN()) {
4098 RUN();
4099
4100 ASSERT_EQUAL_64(0x1234567890abcdef, x2);
4101 ASSERT_EQUAL_64(0xfedcba09, x3);
4102 ASSERT_EQUAL_64(0xfffffffffedcba09, x5);
4103 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q11);
4104 ASSERT_EQUAL_FP64(1.234, d13);
4105 ASSERT_EQUAL_FP32(2.5, s25);
4106
4107 ASSERT_EQUAL_64(0x1234567890abcdef, x6);
4108 ASSERT_EQUAL_64(0xfedcba09, x7);
4109 ASSERT_EQUAL_64(0xfffffffffedcba09, x8);
4110 ASSERT_EQUAL_128(0x1234000056780000, 0xabcd0000ef000000, q18);
4111 ASSERT_EQUAL_FP64(1.234, d14);
4112 ASSERT_EQUAL_FP32(2.5, s26);
4113 }
4114 }
4115
4116 static const PrefetchOperation kPrfmOperations[] = {PLDL1KEEP,
4117 PLDL1STRM,
4118 PLDL2KEEP,
4119 PLDL2STRM,
4120 PLDL3KEEP,
4121 PLDL3STRM,
4122
4123 PLIL1KEEP,
4124 PLIL1STRM,
4125 PLIL2KEEP,
4126 PLIL2STRM,
4127 PLIL3KEEP,
4128 PLIL3STRM,
4129
4130 PSTL1KEEP,
4131 PSTL1STRM,
4132 PSTL2KEEP,
4133 PSTL2STRM,
4134 PSTL3KEEP,
4135 PSTL3STRM};
4136
TEST(prfm_offset)4137 TEST(prfm_offset) {
4138 SETUP();
4139
4140 START();
4141 // The address used in prfm doesn't have to be valid.
4142 __ Mov(x0, 0x0123456789abcdef);
4143
4144 for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
4145 // Unallocated prefetch operations are ignored, so test all of them.
4146 // We have to use the Assembler directly for this.
4147 ExactAssemblyScope guard(&masm, 3 * kInstructionSize);
4148 __ prfm(op, MemOperand(x0));
4149 __ prfm(op, MemOperand(x0, 8));
4150 __ prfm(op, MemOperand(x0, 32760));
4151 }
4152
4153 for (PrefetchOperation op : kPrfmOperations) {
4154 // Also test named operations.
4155 __ Prfm(op, MemOperand(x0, 32768));
4156 __ Prfm(op, MemOperand(x0, 1));
4157 __ Prfm(op, MemOperand(x0, 9));
4158 __ Prfm(op, MemOperand(x0, 255));
4159 __ Prfm(op, MemOperand(x0, 257));
4160 __ Prfm(op, MemOperand(x0, -1));
4161 __ Prfm(op, MemOperand(x0, -9));
4162 __ Prfm(op, MemOperand(x0, -255));
4163 __ Prfm(op, MemOperand(x0, -257));
4164
4165 __ Prfm(op, MemOperand(x0, 0xfedcba9876543210));
4166 }
4167
4168 END();
4169 if (CAN_RUN()) {
4170 RUN();
4171 }
4172 }
4173
4174
TEST(prfm_regoffset)4175 TEST(prfm_regoffset) {
4176 SETUP();
4177
4178 START();
4179 // The address used in prfm doesn't have to be valid.
4180 __ Mov(x0, 0x0123456789abcdef);
4181
4182 CPURegList inputs(CPURegister::kRegister, kXRegSize, 10, 18);
4183 __ Mov(x10, 0);
4184 __ Mov(x11, 1);
4185 __ Mov(x12, 8);
4186 __ Mov(x13, 255);
4187 __ Mov(x14, -0);
4188 __ Mov(x15, -1);
4189 __ Mov(x16, -8);
4190 __ Mov(x17, -255);
4191 __ Mov(x18, 0xfedcba9876543210);
4192
4193 for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
4194 // Unallocated prefetch operations are ignored, so test all of them.
4195 // We have to use the Assembler directly for this.
4196 ExactAssemblyScope guard(&masm, inputs.GetCount() * kInstructionSize);
4197 CPURegList loop = inputs;
4198 while (!loop.IsEmpty()) {
4199 __ prfm(op, MemOperand(x0, Register(loop.PopLowestIndex())));
4200 }
4201 }
4202
4203 for (PrefetchOperation op : kPrfmOperations) {
4204 // Also test named operations.
4205 CPURegList loop = inputs;
4206 while (!loop.IsEmpty()) {
4207 Register input(loop.PopLowestIndex());
4208 __ Prfm(op, MemOperand(x0, input, UXTW));
4209 __ Prfm(op, MemOperand(x0, input, UXTW, 3));
4210 __ Prfm(op, MemOperand(x0, input, LSL));
4211 __ Prfm(op, MemOperand(x0, input, LSL, 3));
4212 __ Prfm(op, MemOperand(x0, input, SXTW));
4213 __ Prfm(op, MemOperand(x0, input, SXTW, 3));
4214 __ Prfm(op, MemOperand(x0, input, SXTX));
4215 __ Prfm(op, MemOperand(x0, input, SXTX, 3));
4216 }
4217 }
4218
4219 END();
4220 if (CAN_RUN()) {
4221 RUN();
4222 }
4223 }
4224
4225
TEST(prfm_literal_imm19)4226 TEST(prfm_literal_imm19) {
4227 SETUP();
4228 START();
4229
4230 for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
4231 // Unallocated prefetch operations are ignored, so test all of them.
4232 // We have to use the Assembler directly for this.
4233 ExactAssemblyScope guard(&masm, 3 * kInstructionSize);
4234 __ prfm(op, INT64_C(0));
4235 __ prfm(op, 1);
4236 __ prfm(op, -1);
4237 }
4238
4239 for (PrefetchOperation op : kPrfmOperations) {
4240 // Also test named operations.
4241 ExactAssemblyScope guard(&masm, 4 * kInstructionSize);
4242 // The address used in prfm doesn't have to be valid.
4243 __ prfm(op, 1000);
4244 __ prfm(op, -1000);
4245 __ prfm(op, 0x3ffff);
4246 __ prfm(op, -0x40000);
4247 }
4248
4249 END();
4250 if (CAN_RUN()) {
4251 RUN();
4252 }
4253 }
4254
4255
TEST(prfm_literal)4256 TEST(prfm_literal) {
4257 SETUP();
4258
4259 Label end_of_pool_before;
4260 Label end_of_pool_after;
4261 Literal<uint64_t> before(0);
4262 Literal<uint64_t> after(0);
4263
4264 START();
4265
4266 // Manually generate a pool.
4267 __ B(&end_of_pool_before);
4268 {
4269 ExactAssemblyScope scope(&masm, before.GetSize());
4270 __ place(&before);
4271 }
4272 __ Bind(&end_of_pool_before);
4273
4274 for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
4275 // Unallocated prefetch operations are ignored, so test all of them.
4276 // We have to use the Assembler directly for this.
4277 ExactAssemblyScope guard(&masm, 2 * kInstructionSize);
4278 __ prfm(op, &before);
4279 __ prfm(op, &after);
4280 }
4281
4282 for (PrefetchOperation op : kPrfmOperations) {
4283 // Also test named operations.
4284 ExactAssemblyScope guard(&masm, 2 * kInstructionSize);
4285 __ prfm(op, &before);
4286 __ prfm(op, &after);
4287 }
4288
4289 // Manually generate a pool.
4290 __ B(&end_of_pool_after);
4291 {
4292 ExactAssemblyScope scope(&masm, after.GetSize());
4293 __ place(&after);
4294 }
4295 __ Bind(&end_of_pool_after);
4296
4297 END();
4298 if (CAN_RUN()) {
4299 RUN();
4300 }
4301 }
4302
4303
TEST(prfm_wide)4304 TEST(prfm_wide) {
4305 SETUP();
4306
4307 START();
4308 // The address used in prfm doesn't have to be valid.
4309 __ Mov(x0, 0x0123456789abcdef);
4310
4311 for (PrefetchOperation op : kPrfmOperations) {
4312 __ Prfm(op, MemOperand(x0, 0x40000));
4313 __ Prfm(op, MemOperand(x0, -0x40001));
4314 __ Prfm(op, MemOperand(x0, UINT64_C(0x5555555555555555)));
4315 __ Prfm(op, MemOperand(x0, UINT64_C(0xfedcba9876543210)));
4316 }
4317
4318 END();
4319 if (CAN_RUN()) {
4320 RUN();
4321 }
4322 }
4323
4324
TEST(load_prfm_literal)4325 TEST(load_prfm_literal) {
4326 // Test literals shared between both prfm and ldr.
4327 SETUP_WITH_FEATURES(CPUFeatures::kFP);
4328
4329 Label end_of_pool_before;
4330 Label end_of_pool_after;
4331
4332 const size_t kSizeOfPoolInBytes = 28;
4333
4334 Literal<uint64_t> before_x(0x1234567890abcdef);
4335 Literal<uint32_t> before_w(0xfedcba09);
4336 Literal<uint32_t> before_sx(0x80000000);
4337 Literal<double> before_d(1.234);
4338 Literal<float> before_s(2.5);
4339 Literal<uint64_t> after_x(0x1234567890abcdef);
4340 Literal<uint32_t> after_w(0xfedcba09);
4341 Literal<uint32_t> after_sx(0x80000000);
4342 Literal<double> after_d(1.234);
4343 Literal<float> after_s(2.5);
4344
4345 START();
4346
4347 // Manually generate a pool.
4348 __ B(&end_of_pool_before);
4349 {
4350 ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
4351 __ place(&before_x);
4352 __ place(&before_w);
4353 __ place(&before_sx);
4354 __ place(&before_d);
4355 __ place(&before_s);
4356 }
4357 __ Bind(&end_of_pool_before);
4358
4359 for (int op = 0; op < (1 << ImmPrefetchOperation_width); op++) {
4360 // Unallocated prefetch operations are ignored, so test all of them.
4361 ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
4362
4363 __ prfm(op, &before_x);
4364 __ prfm(op, &before_w);
4365 __ prfm(op, &before_sx);
4366 __ prfm(op, &before_d);
4367 __ prfm(op, &before_s);
4368
4369 __ prfm(op, &after_x);
4370 __ prfm(op, &after_w);
4371 __ prfm(op, &after_sx);
4372 __ prfm(op, &after_d);
4373 __ prfm(op, &after_s);
4374 }
4375
4376 for (PrefetchOperation op : kPrfmOperations) {
4377 // Also test named operations.
4378 ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
4379
4380 __ prfm(op, &before_x);
4381 __ prfm(op, &before_w);
4382 __ prfm(op, &before_sx);
4383 __ prfm(op, &before_d);
4384 __ prfm(op, &before_s);
4385
4386 __ prfm(op, &after_x);
4387 __ prfm(op, &after_w);
4388 __ prfm(op, &after_sx);
4389 __ prfm(op, &after_d);
4390 __ prfm(op, &after_s);
4391 }
4392
4393 {
4394 ExactAssemblyScope scope(&masm, 10 * kInstructionSize);
4395 __ ldr(x2, &before_x);
4396 __ ldr(w3, &before_w);
4397 __ ldrsw(x5, &before_sx);
4398 __ ldr(d13, &before_d);
4399 __ ldr(s25, &before_s);
4400
4401 __ ldr(x6, &after_x);
4402 __ ldr(w7, &after_w);
4403 __ ldrsw(x8, &after_sx);
4404 __ ldr(d14, &after_d);
4405 __ ldr(s26, &after_s);
4406 }
4407
4408 // Manually generate a pool.
4409 __ B(&end_of_pool_after);
4410 {
4411 ExactAssemblyScope scope(&masm, kSizeOfPoolInBytes);
4412 __ place(&after_x);
4413 __ place(&after_w);
4414 __ place(&after_sx);
4415 __ place(&after_d);
4416 __ place(&after_s);
4417 }
4418 __ Bind(&end_of_pool_after);
4419
4420 END();
4421
4422 if (CAN_RUN()) {
4423 RUN();
4424
4425 ASSERT_EQUAL_64(0x1234567890abcdef, x2);
4426 ASSERT_EQUAL_64(0xfedcba09, x3);
4427 ASSERT_EQUAL_64(0xffffffff80000000, x5);
4428 ASSERT_EQUAL_FP64(1.234, d13);
4429 ASSERT_EQUAL_FP32(2.5, s25);
4430
4431 ASSERT_EQUAL_64(0x1234567890abcdef, x6);
4432 ASSERT_EQUAL_64(0xfedcba09, x7);
4433 ASSERT_EQUAL_64(0xffffffff80000000, x8);
4434 ASSERT_EQUAL_FP64(1.234, d14);
4435 ASSERT_EQUAL_FP32(2.5, s26);
4436 }
4437 }
4438
4439
TEST(add_sub_imm)4440 TEST(add_sub_imm) {
4441 SETUP();
4442
4443 START();
4444 __ Mov(x0, 0x0);
4445 __ Mov(x1, 0x1111);
4446 __ Mov(x2, 0xffffffffffffffff);
4447 __ Mov(x3, 0x8000000000000000);
4448
4449 __ Add(x10, x0, Operand(0x123));
4450 __ Add(x11, x1, Operand(0x122000));
4451 __ Add(x12, x0, Operand(0xabc << 12));
4452 __ Add(x13, x2, Operand(1));
4453
4454 __ Add(w14, w0, Operand(0x123));
4455 __ Add(w15, w1, Operand(0x122000));
4456 __ Add(w16, w0, Operand(0xabc << 12));
4457 __ Add(w17, w2, Operand(1));
4458
4459 __ Sub(x20, x0, Operand(0x1));
4460 __ Sub(x21, x1, Operand(0x111));
4461 __ Sub(x22, x1, Operand(0x1 << 12));
4462 __ Sub(x23, x3, Operand(1));
4463
4464 __ Sub(w24, w0, Operand(0x1));
4465 __ Sub(w25, w1, Operand(0x111));
4466 __ Sub(w26, w1, Operand(0x1 << 12));
4467 __ Sub(w27, w3, Operand(1));
4468 END();
4469
4470 if (CAN_RUN()) {
4471 RUN();
4472
4473 ASSERT_EQUAL_64(0x123, x10);
4474 ASSERT_EQUAL_64(0x123111, x11);
4475 ASSERT_EQUAL_64(0xabc000, x12);
4476 ASSERT_EQUAL_64(0x0, x13);
4477
4478 ASSERT_EQUAL_32(0x123, w14);
4479 ASSERT_EQUAL_32(0x123111, w15);
4480 ASSERT_EQUAL_32(0xabc000, w16);
4481 ASSERT_EQUAL_32(0x0, w17);
4482
4483 ASSERT_EQUAL_64(0xffffffffffffffff, x20);
4484 ASSERT_EQUAL_64(0x1000, x21);
4485 ASSERT_EQUAL_64(0x111, x22);
4486 ASSERT_EQUAL_64(0x7fffffffffffffff, x23);
4487
4488 ASSERT_EQUAL_32(0xffffffff, w24);
4489 ASSERT_EQUAL_32(0x1000, w25);
4490 ASSERT_EQUAL_32(0x111, w26);
4491 ASSERT_EQUAL_32(0xffffffff, w27);
4492 }
4493 }
4494
4495
TEST(add_sub_wide_imm)4496 TEST(add_sub_wide_imm) {
4497 SETUP();
4498
4499 START();
4500 __ Mov(x0, 0x0);
4501 __ Mov(x1, 0x1);
4502
4503 __ Add(x10, x0, Operand(0x1234567890abcdef));
4504 __ Add(x11, x1, Operand(0xffffffff));
4505
4506 __ Add(w12, w0, Operand(0x12345678));
4507 __ Add(w13, w1, Operand(0xffffffff));
4508
4509 __ Add(w18, w0, Operand(kWMinInt));
4510 __ Sub(w19, w0, Operand(kWMinInt));
4511
4512 __ Sub(x20, x0, Operand(0x1234567890abcdef));
4513 __ Sub(w21, w0, Operand(0x12345678));
4514
4515 END();
4516
4517 if (CAN_RUN()) {
4518 RUN();
4519
4520 ASSERT_EQUAL_64(0x1234567890abcdef, x10);
4521 ASSERT_EQUAL_64(0x100000000, x11);
4522
4523 ASSERT_EQUAL_32(0x12345678, w12);
4524 ASSERT_EQUAL_64(0x0, x13);
4525
4526 ASSERT_EQUAL_32(kWMinInt, w18);
4527 ASSERT_EQUAL_32(kWMinInt, w19);
4528
4529 ASSERT_EQUAL_64(-0x1234567890abcdef, x20);
4530 ASSERT_EQUAL_32(-0x12345678, w21);
4531 }
4532 }
4533
4534
TEST(add_sub_shifted)4535 TEST(add_sub_shifted) {
4536 SETUP();
4537
4538 START();
4539 __ Mov(x0, 0);
4540 __ Mov(x1, 0x0123456789abcdef);
4541 __ Mov(x2, 0xfedcba9876543210);
4542 __ Mov(x3, 0xffffffffffffffff);
4543
4544 __ Add(x10, x1, Operand(x2));
4545 __ Add(x11, x0, Operand(x1, LSL, 8));
4546 __ Add(x12, x0, Operand(x1, LSR, 8));
4547 __ Add(x13, x0, Operand(x1, ASR, 8));
4548 __ Add(x14, x0, Operand(x2, ASR, 8));
4549 __ Add(w15, w0, Operand(w1, ASR, 8));
4550 __ Add(w18, w3, Operand(w1, ROR, 8));
4551 __ Add(x19, x3, Operand(x1, ROR, 8));
4552
4553 __ Sub(x20, x3, Operand(x2));
4554 __ Sub(x21, x3, Operand(x1, LSL, 8));
4555 __ Sub(x22, x3, Operand(x1, LSR, 8));
4556 __ Sub(x23, x3, Operand(x1, ASR, 8));
4557 __ Sub(x24, x3, Operand(x2, ASR, 8));
4558 __ Sub(w25, w3, Operand(w1, ASR, 8));
4559 __ Sub(w26, w3, Operand(w1, ROR, 8));
4560 __ Sub(x27, x3, Operand(x1, ROR, 8));
4561 END();
4562
4563 if (CAN_RUN()) {
4564 RUN();
4565
4566 ASSERT_EQUAL_64(0xffffffffffffffff, x10);
4567 ASSERT_EQUAL_64(0x23456789abcdef00, x11);
4568 ASSERT_EQUAL_64(0x000123456789abcd, x12);
4569 ASSERT_EQUAL_64(0x000123456789abcd, x13);
4570 ASSERT_EQUAL_64(0xfffedcba98765432, x14);
4571 ASSERT_EQUAL_64(0xff89abcd, x15);
4572 ASSERT_EQUAL_64(0xef89abcc, x18);
4573 ASSERT_EQUAL_64(0xef0123456789abcc, x19);
4574
4575 ASSERT_EQUAL_64(0x0123456789abcdef, x20);
4576 ASSERT_EQUAL_64(0xdcba9876543210ff, x21);
4577 ASSERT_EQUAL_64(0xfffedcba98765432, x22);
4578 ASSERT_EQUAL_64(0xfffedcba98765432, x23);
4579 ASSERT_EQUAL_64(0x000123456789abcd, x24);
4580 ASSERT_EQUAL_64(0x00765432, x25);
4581 ASSERT_EQUAL_64(0x10765432, x26);
4582 ASSERT_EQUAL_64(0x10fedcba98765432, x27);
4583 }
4584 }
4585
4586
TEST(add_sub_extended)4587 TEST(add_sub_extended) {
4588 SETUP();
4589
4590 START();
4591 __ Mov(x0, 0);
4592 __ Mov(x1, 0x0123456789abcdef);
4593 __ Mov(x2, 0xfedcba9876543210);
4594 __ Mov(w3, 0x80);
4595
4596 __ Add(x10, x0, Operand(x1, UXTB, 0));
4597 __ Add(x11, x0, Operand(x1, UXTB, 1));
4598 __ Add(x12, x0, Operand(x1, UXTH, 2));
4599 __ Add(x13, x0, Operand(x1, UXTW, 4));
4600
4601 __ Add(x14, x0, Operand(x1, SXTB, 0));
4602 __ Add(x15, x0, Operand(x1, SXTB, 1));
4603 __ Add(x16, x0, Operand(x1, SXTH, 2));
4604 __ Add(x17, x0, Operand(x1, SXTW, 3));
4605 __ Add(x18, x0, Operand(x2, SXTB, 0));
4606 __ Add(x19, x0, Operand(x2, SXTB, 1));
4607 __ Add(x20, x0, Operand(x2, SXTH, 2));
4608 __ Add(x21, x0, Operand(x2, SXTW, 3));
4609
4610 __ Add(x22, x1, Operand(x2, SXTB, 1));
4611 __ Sub(x23, x1, Operand(x2, SXTB, 1));
4612
4613 __ Add(w24, w1, Operand(w2, UXTB, 2));
4614 __ Add(w25, w0, Operand(w1, SXTB, 0));
4615 __ Add(w26, w0, Operand(w1, SXTB, 1));
4616 __ Add(w27, w2, Operand(w1, SXTW, 3));
4617
4618 __ Add(w28, w0, Operand(w1, SXTW, 3));
4619 __ Add(x29, x0, Operand(w1, SXTW, 3));
4620
4621 __ Sub(x30, x0, Operand(w3, SXTB, 1));
4622 END();
4623
4624 if (CAN_RUN()) {
4625 RUN();
4626
4627 ASSERT_EQUAL_64(0xef, x10);
4628 ASSERT_EQUAL_64(0x1de, x11);
4629 ASSERT_EQUAL_64(0x337bc, x12);
4630 ASSERT_EQUAL_64(0x89abcdef0, x13);
4631
4632 ASSERT_EQUAL_64(0xffffffffffffffef, x14);
4633 ASSERT_EQUAL_64(0xffffffffffffffde, x15);
4634 ASSERT_EQUAL_64(0xffffffffffff37bc, x16);
4635 ASSERT_EQUAL_64(0xfffffffc4d5e6f78, x17);
4636 ASSERT_EQUAL_64(0x10, x18);
4637 ASSERT_EQUAL_64(0x20, x19);
4638 ASSERT_EQUAL_64(0xc840, x20);
4639 ASSERT_EQUAL_64(0x3b2a19080, x21);
4640
4641 ASSERT_EQUAL_64(0x0123456789abce0f, x22);
4642 ASSERT_EQUAL_64(0x0123456789abcdcf, x23);
4643
4644 ASSERT_EQUAL_32(0x89abce2f, w24);
4645 ASSERT_EQUAL_32(0xffffffef, w25);
4646 ASSERT_EQUAL_32(0xffffffde, w26);
4647 ASSERT_EQUAL_32(0xc3b2a188, w27);
4648
4649 ASSERT_EQUAL_32(0x4d5e6f78, w28);
4650 ASSERT_EQUAL_64(0xfffffffc4d5e6f78, x29);
4651
4652 ASSERT_EQUAL_64(256, x30);
4653 }
4654 }
4655
4656
TEST(add_sub_negative)4657 TEST(add_sub_negative) {
4658 SETUP();
4659
4660 START();
4661 __ Mov(x0, 0);
4662 __ Mov(x1, 4687);
4663 __ Mov(x2, 0x1122334455667788);
4664 __ Mov(w3, 0x11223344);
4665 __ Mov(w4, 400000);
4666
4667 __ Add(x10, x0, -42);
4668 __ Add(x11, x1, -687);
4669 __ Add(x12, x2, -0x88);
4670
4671 __ Sub(x13, x0, -600);
4672 __ Sub(x14, x1, -313);
4673 __ Sub(x15, x2, -0x555);
4674
4675 __ Add(w19, w3, -0x344);
4676 __ Add(w20, w4, -2000);
4677
4678 __ Sub(w21, w3, -0xbc);
4679 __ Sub(w22, w4, -2000);
4680 END();
4681
4682 if (CAN_RUN()) {
4683 RUN();
4684
4685 ASSERT_EQUAL_64(-42, x10);
4686 ASSERT_EQUAL_64(4000, x11);
4687 ASSERT_EQUAL_64(0x1122334455667700, x12);
4688
4689 ASSERT_EQUAL_64(600, x13);
4690 ASSERT_EQUAL_64(5000, x14);
4691 ASSERT_EQUAL_64(0x1122334455667cdd, x15);
4692
4693 ASSERT_EQUAL_32(0x11223000, w19);
4694 ASSERT_EQUAL_32(398000, w20);
4695
4696 ASSERT_EQUAL_32(0x11223400, w21);
4697 ASSERT_EQUAL_32(402000, w22);
4698 }
4699 }
4700
4701
TEST(add_sub_zero)4702 TEST(add_sub_zero) {
4703 SETUP();
4704
4705 START();
4706 __ Mov(x0, 0);
4707 __ Mov(x1, 0);
4708 __ Mov(x2, 0);
4709
4710 Label blob1;
4711 __ Bind(&blob1);
4712 __ Add(x0, x0, 0);
4713 __ Sub(x1, x1, 0);
4714 __ Sub(x2, x2, xzr);
4715 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&blob1) == 0);
4716
4717 Label blob2;
4718 __ Bind(&blob2);
4719 __ Add(w3, w3, 0);
4720 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&blob2) != 0);
4721
4722 Label blob3;
4723 __ Bind(&blob3);
4724 __ Sub(w3, w3, wzr);
4725 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&blob3) != 0);
4726
4727 END();
4728
4729 if (CAN_RUN()) {
4730 RUN();
4731
4732 ASSERT_EQUAL_64(0, x0);
4733 ASSERT_EQUAL_64(0, x1);
4734 ASSERT_EQUAL_64(0, x2);
4735 }
4736 }
4737
4738
TEST(claim_drop_zero)4739 TEST(claim_drop_zero) {
4740 SETUP();
4741
4742 START();
4743
4744 Label start;
4745 __ Bind(&start);
4746 __ Claim(Operand(0));
4747 __ Drop(Operand(0));
4748 __ Claim(Operand(xzr));
4749 __ Drop(Operand(xzr));
4750 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&start) == 0);
4751
4752 END();
4753
4754 if (CAN_RUN()) {
4755 RUN();
4756 }
4757 }
4758
4759
TEST(neg)4760 TEST(neg) {
4761 SETUP();
4762
4763 START();
4764 __ Mov(x0, 0xf123456789abcdef);
4765
4766 // Immediate.
4767 __ Neg(x1, 0x123);
4768 __ Neg(w2, 0x123);
4769
4770 // Shifted.
4771 __ Neg(x3, Operand(x0, LSL, 1));
4772 __ Neg(w4, Operand(w0, LSL, 2));
4773 __ Neg(x5, Operand(x0, LSR, 3));
4774 __ Neg(w6, Operand(w0, LSR, 4));
4775 __ Neg(x7, Operand(x0, ASR, 5));
4776 __ Neg(w8, Operand(w0, ASR, 6));
4777
4778 // Extended.
4779 __ Neg(w9, Operand(w0, UXTB));
4780 __ Neg(x10, Operand(x0, SXTB, 1));
4781 __ Neg(w11, Operand(w0, UXTH, 2));
4782 __ Neg(x12, Operand(x0, SXTH, 3));
4783 __ Neg(w13, Operand(w0, UXTW, 4));
4784 __ Neg(x14, Operand(x0, SXTW, 4));
4785 END();
4786
4787 if (CAN_RUN()) {
4788 RUN();
4789
4790 ASSERT_EQUAL_64(0xfffffffffffffedd, x1);
4791 ASSERT_EQUAL_64(0xfffffedd, x2);
4792 ASSERT_EQUAL_64(0x1db97530eca86422, x3);
4793 ASSERT_EQUAL_64(0xd950c844, x4);
4794 ASSERT_EQUAL_64(0xe1db97530eca8643, x5);
4795 ASSERT_EQUAL_64(0xf7654322, x6);
4796 ASSERT_EQUAL_64(0x0076e5d4c3b2a191, x7);
4797 ASSERT_EQUAL_64(0x01d950c9, x8);
4798 ASSERT_EQUAL_64(0xffffff11, x9);
4799 ASSERT_EQUAL_64(0x0000000000000022, x10);
4800 ASSERT_EQUAL_64(0xfffcc844, x11);
4801 ASSERT_EQUAL_64(0x0000000000019088, x12);
4802 ASSERT_EQUAL_64(0x65432110, x13);
4803 ASSERT_EQUAL_64(0x0000000765432110, x14);
4804 }
4805 }
4806
4807
4808 template <typename T, typename Op>
AdcsSbcsHelper(Op op,T left,T right,int carry,T expected,StatusFlags expected_flags)4809 static void AdcsSbcsHelper(
4810 Op op, T left, T right, int carry, T expected, StatusFlags expected_flags) {
4811 int reg_size = sizeof(T) * 8;
4812 Register left_reg(0, reg_size);
4813 Register right_reg(1, reg_size);
4814 Register result_reg(2, reg_size);
4815
4816 SETUP();
4817 START();
4818
4819 __ Mov(left_reg, left);
4820 __ Mov(right_reg, right);
4821 __ Mov(x10, (carry ? CFlag : NoFlag));
4822
4823 __ Msr(NZCV, x10);
4824 (masm.*op)(result_reg, left_reg, right_reg);
4825
4826 END();
4827 if (CAN_RUN()) {
4828 RUN();
4829
4830 ASSERT_EQUAL_64(left, left_reg.X());
4831 ASSERT_EQUAL_64(right, right_reg.X());
4832 ASSERT_EQUAL_64(expected, result_reg.X());
4833 ASSERT_EQUAL_NZCV(expected_flags);
4834 }
4835 }
4836
4837
TEST(adcs_sbcs_x)4838 TEST(adcs_sbcs_x) {
4839 uint64_t inputs[] = {
4840 0x0000000000000000,
4841 0x0000000000000001,
4842 0x7ffffffffffffffe,
4843 0x7fffffffffffffff,
4844 0x8000000000000000,
4845 0x8000000000000001,
4846 0xfffffffffffffffe,
4847 0xffffffffffffffff,
4848 };
4849 static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
4850
4851 struct Expected {
4852 uint64_t carry0_result;
4853 StatusFlags carry0_flags;
4854 uint64_t carry1_result;
4855 StatusFlags carry1_flags;
4856 };
4857
4858 static const Expected expected_adcs_x[input_count][input_count] =
4859 {{{0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag},
4860 {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
4861 {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
4862 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4863 {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
4864 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4865 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4866 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}},
4867 {{0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
4868 {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
4869 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4870 {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
4871 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4872 {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
4873 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4874 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag}},
4875 {{0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
4876 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4877 {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
4878 {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
4879 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4880 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4881 {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
4882 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag}},
4883 {{0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4884 {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
4885 {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
4886 {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
4887 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4888 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4889 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4890 {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag}},
4891 {{0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
4892 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4893 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4894 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4895 {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
4896 {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
4897 {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
4898 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag}},
4899 {{0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4900 {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
4901 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4902 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4903 {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
4904 {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
4905 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4906 {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag}},
4907 {{0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4908 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4909 {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
4910 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4911 {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
4912 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4913 {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
4914 {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag}},
4915 {{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4916 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4917 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4918 {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
4919 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4920 {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
4921 {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
4922 {0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag}}};
4923
4924 static const Expected expected_sbcs_x[input_count][input_count] =
4925 {{{0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4926 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4927 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4928 {0x8000000000000000, NFlag, 0x8000000000000001, NFlag},
4929 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4930 {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag},
4931 {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag},
4932 {0x0000000000000000, ZFlag, 0x0000000000000001, NoFlag}},
4933 {{0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4934 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4935 {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
4936 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4937 {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
4938 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4939 {0x0000000000000002, NoFlag, 0x0000000000000003, NoFlag},
4940 {0x0000000000000001, NoFlag, 0x0000000000000002, NoFlag}},
4941 {{0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4942 {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
4943 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4944 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4945 {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
4946 {0xfffffffffffffffc, NVFlag, 0xfffffffffffffffd, NVFlag},
4947 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag},
4948 {0x7ffffffffffffffe, NoFlag, 0x7fffffffffffffff, NoFlag}},
4949 {{0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
4950 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4951 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4952 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4953 {0xfffffffffffffffe, NVFlag, 0xffffffffffffffff, NVFlag},
4954 {0xfffffffffffffffd, NVFlag, 0xfffffffffffffffe, NVFlag},
4955 {0x8000000000000000, NVFlag, 0x8000000000000001, NVFlag},
4956 {0x7fffffffffffffff, NoFlag, 0x8000000000000000, NVFlag}},
4957 {{0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4958 {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
4959 {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
4960 {0x0000000000000000, ZCVFlag, 0x0000000000000001, CVFlag},
4961 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4962 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag},
4963 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag},
4964 {0x8000000000000000, NFlag, 0x8000000000000001, NFlag}},
4965 {{0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
4966 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4967 {0x0000000000000002, CVFlag, 0x0000000000000003, CVFlag},
4968 {0x0000000000000001, CVFlag, 0x0000000000000002, CVFlag},
4969 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4970 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4971 {0x8000000000000002, NFlag, 0x8000000000000003, NFlag},
4972 {0x8000000000000001, NFlag, 0x8000000000000002, NFlag}},
4973 {{0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
4974 {0xfffffffffffffffc, NCFlag, 0xfffffffffffffffd, NCFlag},
4975 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4976 {0x7ffffffffffffffe, CVFlag, 0x7fffffffffffffff, CVFlag},
4977 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4978 {0x7ffffffffffffffc, CFlag, 0x7ffffffffffffffd, CFlag},
4979 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag},
4980 {0xfffffffffffffffe, NFlag, 0xffffffffffffffff, NFlag}},
4981 {{0xfffffffffffffffe, NCFlag, 0xffffffffffffffff, NCFlag},
4982 {0xfffffffffffffffd, NCFlag, 0xfffffffffffffffe, NCFlag},
4983 {0x8000000000000000, NCFlag, 0x8000000000000001, NCFlag},
4984 {0x7fffffffffffffff, CVFlag, 0x8000000000000000, NCFlag},
4985 {0x7ffffffffffffffe, CFlag, 0x7fffffffffffffff, CFlag},
4986 {0x7ffffffffffffffd, CFlag, 0x7ffffffffffffffe, CFlag},
4987 {0x0000000000000000, ZCFlag, 0x0000000000000001, CFlag},
4988 {0xffffffffffffffff, NFlag, 0x0000000000000000, ZCFlag}}};
4989
4990 for (size_t left = 0; left < input_count; left++) {
4991 for (size_t right = 0; right < input_count; right++) {
4992 const Expected& expected = expected_adcs_x[left][right];
4993 AdcsSbcsHelper(&MacroAssembler::Adcs,
4994 inputs[left],
4995 inputs[right],
4996 0,
4997 expected.carry0_result,
4998 expected.carry0_flags);
4999 AdcsSbcsHelper(&MacroAssembler::Adcs,
5000 inputs[left],
5001 inputs[right],
5002 1,
5003 expected.carry1_result,
5004 expected.carry1_flags);
5005 }
5006 }
5007
5008 for (size_t left = 0; left < input_count; left++) {
5009 for (size_t right = 0; right < input_count; right++) {
5010 const Expected& expected = expected_sbcs_x[left][right];
5011 AdcsSbcsHelper(&MacroAssembler::Sbcs,
5012 inputs[left],
5013 inputs[right],
5014 0,
5015 expected.carry0_result,
5016 expected.carry0_flags);
5017 AdcsSbcsHelper(&MacroAssembler::Sbcs,
5018 inputs[left],
5019 inputs[right],
5020 1,
5021 expected.carry1_result,
5022 expected.carry1_flags);
5023 }
5024 }
5025 }
5026
5027
TEST(adcs_sbcs_w)5028 TEST(adcs_sbcs_w) {
5029 uint32_t inputs[] = {
5030 0x00000000,
5031 0x00000001,
5032 0x7ffffffe,
5033 0x7fffffff,
5034 0x80000000,
5035 0x80000001,
5036 0xfffffffe,
5037 0xffffffff,
5038 };
5039 static const size_t input_count = sizeof(inputs) / sizeof(inputs[0]);
5040
5041 struct Expected {
5042 uint32_t carry0_result;
5043 StatusFlags carry0_flags;
5044 uint32_t carry1_result;
5045 StatusFlags carry1_flags;
5046 };
5047
5048 static const Expected expected_adcs_w[input_count][input_count] =
5049 {{{0x00000000, ZFlag, 0x00000001, NoFlag},
5050 {0x00000001, NoFlag, 0x00000002, NoFlag},
5051 {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
5052 {0x7fffffff, NoFlag, 0x80000000, NVFlag},
5053 {0x80000000, NFlag, 0x80000001, NFlag},
5054 {0x80000001, NFlag, 0x80000002, NFlag},
5055 {0xfffffffe, NFlag, 0xffffffff, NFlag},
5056 {0xffffffff, NFlag, 0x00000000, ZCFlag}},
5057 {{0x00000001, NoFlag, 0x00000002, NoFlag},
5058 {0x00000002, NoFlag, 0x00000003, NoFlag},
5059 {0x7fffffff, NoFlag, 0x80000000, NVFlag},
5060 {0x80000000, NVFlag, 0x80000001, NVFlag},
5061 {0x80000001, NFlag, 0x80000002, NFlag},
5062 {0x80000002, NFlag, 0x80000003, NFlag},
5063 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5064 {0x00000000, ZCFlag, 0x00000001, CFlag}},
5065 {{0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
5066 {0x7fffffff, NoFlag, 0x80000000, NVFlag},
5067 {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
5068 {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
5069 {0xfffffffe, NFlag, 0xffffffff, NFlag},
5070 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5071 {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
5072 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag}},
5073 {{0x7fffffff, NoFlag, 0x80000000, NVFlag},
5074 {0x80000000, NVFlag, 0x80000001, NVFlag},
5075 {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
5076 {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
5077 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5078 {0x00000000, ZCFlag, 0x00000001, CFlag},
5079 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5080 {0x7ffffffe, CFlag, 0x7fffffff, CFlag}},
5081 {{0x80000000, NFlag, 0x80000001, NFlag},
5082 {0x80000001, NFlag, 0x80000002, NFlag},
5083 {0xfffffffe, NFlag, 0xffffffff, NFlag},
5084 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5085 {0x00000000, ZCVFlag, 0x00000001, CVFlag},
5086 {0x00000001, CVFlag, 0x00000002, CVFlag},
5087 {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
5088 {0x7fffffff, CVFlag, 0x80000000, NCFlag}},
5089 {{0x80000001, NFlag, 0x80000002, NFlag},
5090 {0x80000002, NFlag, 0x80000003, NFlag},
5091 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5092 {0x00000000, ZCFlag, 0x00000001, CFlag},
5093 {0x00000001, CVFlag, 0x00000002, CVFlag},
5094 {0x00000002, CVFlag, 0x00000003, CVFlag},
5095 {0x7fffffff, CVFlag, 0x80000000, NCFlag},
5096 {0x80000000, NCFlag, 0x80000001, NCFlag}},
5097 {{0xfffffffe, NFlag, 0xffffffff, NFlag},
5098 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5099 {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
5100 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5101 {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
5102 {0x7fffffff, CVFlag, 0x80000000, NCFlag},
5103 {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
5104 {0xfffffffd, NCFlag, 0xfffffffe, NCFlag}},
5105 {{0xffffffff, NFlag, 0x00000000, ZCFlag},
5106 {0x00000000, ZCFlag, 0x00000001, CFlag},
5107 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5108 {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
5109 {0x7fffffff, CVFlag, 0x80000000, NCFlag},
5110 {0x80000000, NCFlag, 0x80000001, NCFlag},
5111 {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
5112 {0xfffffffe, NCFlag, 0xffffffff, NCFlag}}};
5113
5114 static const Expected expected_sbcs_w[input_count][input_count] =
5115 {{{0xffffffff, NFlag, 0x00000000, ZCFlag},
5116 {0xfffffffe, NFlag, 0xffffffff, NFlag},
5117 {0x80000001, NFlag, 0x80000002, NFlag},
5118 {0x80000000, NFlag, 0x80000001, NFlag},
5119 {0x7fffffff, NoFlag, 0x80000000, NVFlag},
5120 {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag},
5121 {0x00000001, NoFlag, 0x00000002, NoFlag},
5122 {0x00000000, ZFlag, 0x00000001, NoFlag}},
5123 {{0x00000000, ZCFlag, 0x00000001, CFlag},
5124 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5125 {0x80000002, NFlag, 0x80000003, NFlag},
5126 {0x80000001, NFlag, 0x80000002, NFlag},
5127 {0x80000000, NVFlag, 0x80000001, NVFlag},
5128 {0x7fffffff, NoFlag, 0x80000000, NVFlag},
5129 {0x00000002, NoFlag, 0x00000003, NoFlag},
5130 {0x00000001, NoFlag, 0x00000002, NoFlag}},
5131 {{0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5132 {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
5133 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5134 {0xfffffffe, NFlag, 0xffffffff, NFlag},
5135 {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
5136 {0xfffffffc, NVFlag, 0xfffffffd, NVFlag},
5137 {0x7fffffff, NoFlag, 0x80000000, NVFlag},
5138 {0x7ffffffe, NoFlag, 0x7fffffff, NoFlag}},
5139 {{0x7ffffffe, CFlag, 0x7fffffff, CFlag},
5140 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5141 {0x00000000, ZCFlag, 0x00000001, CFlag},
5142 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5143 {0xfffffffe, NVFlag, 0xffffffff, NVFlag},
5144 {0xfffffffd, NVFlag, 0xfffffffe, NVFlag},
5145 {0x80000000, NVFlag, 0x80000001, NVFlag},
5146 {0x7fffffff, NoFlag, 0x80000000, NVFlag}},
5147 {{0x7fffffff, CVFlag, 0x80000000, NCFlag},
5148 {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
5149 {0x00000001, CVFlag, 0x00000002, CVFlag},
5150 {0x00000000, ZCVFlag, 0x00000001, CVFlag},
5151 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5152 {0xfffffffe, NFlag, 0xffffffff, NFlag},
5153 {0x80000001, NFlag, 0x80000002, NFlag},
5154 {0x80000000, NFlag, 0x80000001, NFlag}},
5155 {{0x80000000, NCFlag, 0x80000001, NCFlag},
5156 {0x7fffffff, CVFlag, 0x80000000, NCFlag},
5157 {0x00000002, CVFlag, 0x00000003, CVFlag},
5158 {0x00000001, CVFlag, 0x00000002, CVFlag},
5159 {0x00000000, ZCFlag, 0x00000001, CFlag},
5160 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5161 {0x80000002, NFlag, 0x80000003, NFlag},
5162 {0x80000001, NFlag, 0x80000002, NFlag}},
5163 {{0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
5164 {0xfffffffc, NCFlag, 0xfffffffd, NCFlag},
5165 {0x7fffffff, CVFlag, 0x80000000, NCFlag},
5166 {0x7ffffffe, CVFlag, 0x7fffffff, CVFlag},
5167 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5168 {0x7ffffffc, CFlag, 0x7ffffffd, CFlag},
5169 {0xffffffff, NFlag, 0x00000000, ZCFlag},
5170 {0xfffffffe, NFlag, 0xffffffff, NFlag}},
5171 {{0xfffffffe, NCFlag, 0xffffffff, NCFlag},
5172 {0xfffffffd, NCFlag, 0xfffffffe, NCFlag},
5173 {0x80000000, NCFlag, 0x80000001, NCFlag},
5174 {0x7fffffff, CVFlag, 0x80000000, NCFlag},
5175 {0x7ffffffe, CFlag, 0x7fffffff, CFlag},
5176 {0x7ffffffd, CFlag, 0x7ffffffe, CFlag},
5177 {0x00000000, ZCFlag, 0x00000001, CFlag},
5178 {0xffffffff, NFlag, 0x00000000, ZCFlag}}};
5179
5180 for (size_t left = 0; left < input_count; left++) {
5181 for (size_t right = 0; right < input_count; right++) {
5182 const Expected& expected = expected_adcs_w[left][right];
5183 AdcsSbcsHelper(&MacroAssembler::Adcs,
5184 inputs[left],
5185 inputs[right],
5186 0,
5187 expected.carry0_result,
5188 expected.carry0_flags);
5189 AdcsSbcsHelper(&MacroAssembler::Adcs,
5190 inputs[left],
5191 inputs[right],
5192 1,
5193 expected.carry1_result,
5194 expected.carry1_flags);
5195 }
5196 }
5197
5198 for (size_t left = 0; left < input_count; left++) {
5199 for (size_t right = 0; right < input_count; right++) {
5200 const Expected& expected = expected_sbcs_w[left][right];
5201 AdcsSbcsHelper(&MacroAssembler::Sbcs,
5202 inputs[left],
5203 inputs[right],
5204 0,
5205 expected.carry0_result,
5206 expected.carry0_flags);
5207 AdcsSbcsHelper(&MacroAssembler::Sbcs,
5208 inputs[left],
5209 inputs[right],
5210 1,
5211 expected.carry1_result,
5212 expected.carry1_flags);
5213 }
5214 }
5215 }
5216
5217
TEST(adc_sbc_shift)5218 TEST(adc_sbc_shift) {
5219 SETUP();
5220
5221 START();
5222 __ Mov(x0, 0);
5223 __ Mov(x1, 1);
5224 __ Mov(x2, 0x0123456789abcdef);
5225 __ Mov(x3, 0xfedcba9876543210);
5226 __ Mov(x4, 0xffffffffffffffff);
5227
5228 // Clear the C flag.
5229 __ Adds(x0, x0, Operand(0));
5230
5231 __ Adc(x5, x2, Operand(x3));
5232 __ Adc(x6, x0, Operand(x1, LSL, 60));
5233 __ Sbc(x7, x4, Operand(x3, LSR, 4));
5234 __ Adc(x8, x2, Operand(x3, ASR, 4));
5235 __ Adc(x9, x2, Operand(x3, ROR, 8));
5236
5237 __ Adc(w10, w2, Operand(w3));
5238 __ Adc(w11, w0, Operand(w1, LSL, 30));
5239 __ Sbc(w12, w4, Operand(w3, LSR, 4));
5240 __ Adc(w13, w2, Operand(w3, ASR, 4));
5241 __ Adc(w14, w2, Operand(w3, ROR, 8));
5242
5243 // Set the C flag.
5244 __ Cmp(w0, Operand(w0));
5245
5246 __ Adc(x18, x2, Operand(x3));
5247 __ Adc(x19, x0, Operand(x1, LSL, 60));
5248 __ Sbc(x20, x4, Operand(x3, LSR, 4));
5249 __ Adc(x21, x2, Operand(x3, ASR, 4));
5250 __ Adc(x22, x2, Operand(x3, ROR, 8));
5251
5252 __ Adc(w23, w2, Operand(w3));
5253 __ Adc(w24, w0, Operand(w1, LSL, 30));
5254 __ Sbc(w25, w4, Operand(w3, LSR, 4));
5255 __ Adc(w26, w2, Operand(w3, ASR, 4));
5256 __ Adc(w27, w2, Operand(w3, ROR, 8));
5257 END();
5258
5259 if (CAN_RUN()) {
5260 RUN();
5261
5262 ASSERT_EQUAL_64(0xffffffffffffffff, x5);
5263 ASSERT_EQUAL_64(INT64_C(1) << 60, x6);
5264 ASSERT_EQUAL_64(0xf0123456789abcdd, x7);
5265 ASSERT_EQUAL_64(0x0111111111111110, x8);
5266 ASSERT_EQUAL_64(0x1222222222222221, x9);
5267
5268 ASSERT_EQUAL_32(0xffffffff, w10);
5269 ASSERT_EQUAL_32(INT32_C(1) << 30, w11);
5270 ASSERT_EQUAL_32(0xf89abcdd, w12);
5271 ASSERT_EQUAL_32(0x91111110, w13);
5272 ASSERT_EQUAL_32(0x9a222221, w14);
5273
5274 ASSERT_EQUAL_64(0xffffffffffffffff + 1, x18);
5275 ASSERT_EQUAL_64((INT64_C(1) << 60) + 1, x19);
5276 ASSERT_EQUAL_64(0xf0123456789abcdd + 1, x20);
5277 ASSERT_EQUAL_64(0x0111111111111110 + 1, x21);
5278 ASSERT_EQUAL_64(0x1222222222222221 + 1, x22);
5279
5280 ASSERT_EQUAL_32(0xffffffff + 1, w23);
5281 ASSERT_EQUAL_32((INT32_C(1) << 30) + 1, w24);
5282 ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
5283 ASSERT_EQUAL_32(0x91111110 + 1, w26);
5284 ASSERT_EQUAL_32(0x9a222221 + 1, w27);
5285 }
5286 }
5287
5288
TEST(adc_sbc_extend)5289 TEST(adc_sbc_extend) {
5290 SETUP();
5291
5292 START();
5293 // Clear the C flag.
5294 __ Adds(x0, x0, Operand(0));
5295
5296 __ Mov(x0, 0);
5297 __ Mov(x1, 1);
5298 __ Mov(x2, 0x0123456789abcdef);
5299
5300 __ Adc(x10, x1, Operand(w2, UXTB, 1));
5301 __ Adc(x11, x1, Operand(x2, SXTH, 2));
5302 __ Sbc(x12, x1, Operand(w2, UXTW, 4));
5303 __ Adc(x13, x1, Operand(x2, UXTX, 4));
5304
5305 __ Adc(w14, w1, Operand(w2, UXTB, 1));
5306 __ Adc(w15, w1, Operand(w2, SXTH, 2));
5307 __ Adc(w9, w1, Operand(w2, UXTW, 4));
5308
5309 // Set the C flag.
5310 __ Cmp(w0, Operand(w0));
5311
5312 __ Adc(x20, x1, Operand(w2, UXTB, 1));
5313 __ Adc(x21, x1, Operand(x2, SXTH, 2));
5314 __ Sbc(x22, x1, Operand(w2, UXTW, 4));
5315 __ Adc(x23, x1, Operand(x2, UXTX, 4));
5316
5317 __ Adc(w24, w1, Operand(w2, UXTB, 1));
5318 __ Adc(w25, w1, Operand(w2, SXTH, 2));
5319 __ Adc(w26, w1, Operand(w2, UXTW, 4));
5320 END();
5321
5322 if (CAN_RUN()) {
5323 RUN();
5324
5325 ASSERT_EQUAL_64(0x1df, x10);
5326 ASSERT_EQUAL_64(0xffffffffffff37bd, x11);
5327 ASSERT_EQUAL_64(0xfffffff765432110, x12);
5328 ASSERT_EQUAL_64(0x123456789abcdef1, x13);
5329
5330 ASSERT_EQUAL_32(0x1df, w14);
5331 ASSERT_EQUAL_32(0xffff37bd, w15);
5332 ASSERT_EQUAL_32(0x9abcdef1, w9);
5333
5334 ASSERT_EQUAL_64(0x1df + 1, x20);
5335 ASSERT_EQUAL_64(0xffffffffffff37bd + 1, x21);
5336 ASSERT_EQUAL_64(0xfffffff765432110 + 1, x22);
5337 ASSERT_EQUAL_64(0x123456789abcdef1 + 1, x23);
5338
5339 ASSERT_EQUAL_32(0x1df + 1, w24);
5340 ASSERT_EQUAL_32(0xffff37bd + 1, w25);
5341 ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
5342 }
5343
5344 // Check that adc correctly sets the condition flags.
5345 START();
5346 __ Mov(x0, 0xff);
5347 __ Mov(x1, 0xffffffffffffffff);
5348 // Clear the C flag.
5349 __ Adds(x0, x0, Operand(0));
5350 __ Adcs(x10, x0, Operand(x1, SXTX, 1));
5351 END();
5352
5353 if (CAN_RUN()) {
5354 RUN();
5355
5356 ASSERT_EQUAL_NZCV(CFlag);
5357 }
5358
5359 START();
5360 __ Mov(x0, 0x7fffffffffffffff);
5361 __ Mov(x1, 1);
5362 // Clear the C flag.
5363 __ Adds(x0, x0, Operand(0));
5364 __ Adcs(x10, x0, Operand(x1, UXTB, 2));
5365 END();
5366
5367 if (CAN_RUN()) {
5368 RUN();
5369
5370 ASSERT_EQUAL_NZCV(NVFlag);
5371 }
5372
5373 START();
5374 __ Mov(x0, 0x7fffffffffffffff);
5375 // Clear the C flag.
5376 __ Adds(x0, x0, Operand(0));
5377 __ Adcs(x10, x0, Operand(1));
5378 END();
5379
5380 if (CAN_RUN()) {
5381 RUN();
5382
5383 ASSERT_EQUAL_NZCV(NVFlag);
5384 }
5385 }
5386
5387
TEST(adc_sbc_wide_imm)5388 TEST(adc_sbc_wide_imm) {
5389 SETUP();
5390
5391 START();
5392 __ Mov(x0, 0);
5393
5394 // Clear the C flag.
5395 __ Adds(x0, x0, Operand(0));
5396
5397 __ Adc(x7, x0, Operand(0x1234567890abcdef));
5398 __ Adc(w8, w0, Operand(0xffffffff));
5399 __ Sbc(x9, x0, Operand(0x1234567890abcdef));
5400 __ Sbc(w10, w0, Operand(0xffffffff));
5401 __ Ngc(x11, Operand(0xffffffff00000000));
5402 __ Ngc(w12, Operand(0xffff0000));
5403
5404 // Set the C flag.
5405 __ Cmp(w0, Operand(w0));
5406
5407 __ Adc(x18, x0, Operand(0x1234567890abcdef));
5408 __ Adc(w19, w0, Operand(0xffffffff));
5409 __ Sbc(x20, x0, Operand(0x1234567890abcdef));
5410 __ Sbc(w21, w0, Operand(0xffffffff));
5411 __ Ngc(x22, Operand(0xffffffff00000000));
5412 __ Ngc(w23, Operand(0xffff0000));
5413 END();
5414
5415 if (CAN_RUN()) {
5416 RUN();
5417
5418 ASSERT_EQUAL_64(0x1234567890abcdef, x7);
5419 ASSERT_EQUAL_64(0xffffffff, x8);
5420 ASSERT_EQUAL_64(0xedcba9876f543210, x9);
5421 ASSERT_EQUAL_64(0, x10);
5422 ASSERT_EQUAL_64(0xffffffff, x11);
5423 ASSERT_EQUAL_64(0xffff, x12);
5424
5425 ASSERT_EQUAL_64(0x1234567890abcdef + 1, x18);
5426 ASSERT_EQUAL_64(0, x19);
5427 ASSERT_EQUAL_64(0xedcba9876f543211, x20);
5428 ASSERT_EQUAL_64(1, x21);
5429 ASSERT_EQUAL_64(0x0000000100000000, x22);
5430 ASSERT_EQUAL_64(0x0000000000010000, x23);
5431 }
5432 }
5433
5434
TEST(rmif)5435 TEST(rmif) {
5436 SETUP_WITH_FEATURES(CPUFeatures::kFlagM);
5437
5438 START();
5439 __ Mov(x0, 0x0123456789abcdef);
5440
5441 // Set NZCV to 0b1011 (0xb)
5442 __ Rmif(x0, 0, NCVFlag);
5443 __ Mrs(x1, NZCV);
5444
5445 // Set NZCV to 0b0111 (0x7)
5446 __ Rmif(x0, 6, NZCVFlag);
5447 __ Mrs(x2, NZCV);
5448
5449 // Set Z to 0, NZCV = 0b0011 (0x3)
5450 __ Rmif(x0, 60, ZFlag);
5451 __ Mrs(x3, NZCV);
5452
5453 // Set N to 1 and C to 0, NZCV = 0b1001 (0x9)
5454 __ Rmif(x0, 62, NCFlag);
5455 __ Mrs(x4, NZCV);
5456
5457 // No change to NZCV
5458 __ Rmif(x0, 0, NoFlag);
5459 __ Mrs(x5, NZCV);
5460 END();
5461
5462 if (CAN_RUN()) {
5463 RUN();
5464 ASSERT_EQUAL_32(NCVFlag, w1);
5465 ASSERT_EQUAL_32(ZCVFlag, w2);
5466 ASSERT_EQUAL_32(CVFlag, w3);
5467 ASSERT_EQUAL_32(NVFlag, w4);
5468 ASSERT_EQUAL_32(NVFlag, w5);
5469 }
5470 }
5471
5472
TEST(setf8_setf16)5473 TEST(setf8_setf16) {
5474 SETUP_WITH_FEATURES(CPUFeatures::kFlagM);
5475
5476 START();
5477 __ Mov(x0, 0x0);
5478 __ Mov(x1, 0x1);
5479 __ Mov(x2, 0xff);
5480 __ Mov(x3, 0x100);
5481 __ Mov(x4, 0x101);
5482 __ Mov(x5, 0xffff);
5483 __ Mov(x6, 0x10000);
5484 __ Mov(x7, 0x10001);
5485 __ Mov(x8, 0xfffffffff);
5486
5487 __ Setf8(w0);
5488 __ Mrs(x9, NZCV);
5489 __ Setf8(w1);
5490 __ Mrs(x10, NZCV);
5491 __ Setf8(w2);
5492 __ Mrs(x11, NZCV);
5493 __ Setf8(w3);
5494 __ Mrs(x12, NZCV);
5495 __ Setf8(w4);
5496 __ Mrs(x13, NZCV);
5497 __ Setf8(w8);
5498 __ Mrs(x14, NZCV);
5499
5500 __ Setf16(w0);
5501 __ Mrs(x15, NZCV);
5502 __ Setf16(w1);
5503 __ Mrs(x16, NZCV);
5504 __ Setf16(w5);
5505 __ Mrs(x17, NZCV);
5506 __ Setf16(w6);
5507 __ Mrs(x18, NZCV);
5508 __ Setf16(w7);
5509 __ Mrs(x19, NZCV);
5510 __ Setf16(w8);
5511 __ Mrs(x20, NZCV);
5512 END();
5513
5514 if (CAN_RUN()) {
5515 RUN();
5516
5517 ASSERT_EQUAL_32(ZFlag, w9); // Zero
5518 ASSERT_EQUAL_32(NoFlag, w10); // Regular int8
5519 ASSERT_EQUAL_32(NVFlag, w11); // Negative but not sign-extended (overflow)
5520 ASSERT_EQUAL_32(ZVFlag, w12); // Overflow with zero remainder
5521 ASSERT_EQUAL_32(VFlag, w13); // Overflow with non-zero remainder
5522 ASSERT_EQUAL_32(NFlag, w14); // Negative and sign-extended
5523
5524 ASSERT_EQUAL_32(ZFlag, w15); // Zero
5525 ASSERT_EQUAL_32(NoFlag, w16); // Regular int16
5526 ASSERT_EQUAL_32(NVFlag, w17); // Negative but not sign-extended (overflow)
5527 ASSERT_EQUAL_32(ZVFlag, w18); // Overflow with zero remainder
5528 ASSERT_EQUAL_32(VFlag, w19); // Overflow with non-zero remainder
5529 ASSERT_EQUAL_32(NFlag, w20); // Negative and sign-extended
5530 }
5531 }
5532
5533
TEST(flags)5534 TEST(flags) {
5535 SETUP();
5536
5537 START();
5538 __ Mov(x0, 0);
5539 __ Mov(x1, 0x1111111111111111);
5540 __ Neg(x10, Operand(x0));
5541 __ Neg(x11, Operand(x1));
5542 __ Neg(w12, Operand(w1));
5543 // Clear the C flag.
5544 __ Adds(x0, x0, Operand(0));
5545 __ Ngc(x13, Operand(x0));
5546 // Set the C flag.
5547 __ Cmp(x0, Operand(x0));
5548 __ Ngc(w14, Operand(w0));
5549 END();
5550
5551 if (CAN_RUN()) {
5552 RUN();
5553
5554 ASSERT_EQUAL_64(0, x10);
5555 ASSERT_EQUAL_64(-0x1111111111111111, x11);
5556 ASSERT_EQUAL_32(-0x11111111, w12);
5557 ASSERT_EQUAL_64(-1, x13);
5558 ASSERT_EQUAL_32(0, w14);
5559 }
5560
5561 START();
5562 __ Mov(x0, 0);
5563 __ Cmp(x0, Operand(x0));
5564 END();
5565
5566 if (CAN_RUN()) {
5567 RUN();
5568
5569 ASSERT_EQUAL_NZCV(ZCFlag);
5570 }
5571
5572 START();
5573 __ Mov(w0, 0);
5574 __ Cmp(w0, Operand(w0));
5575 END();
5576
5577 if (CAN_RUN()) {
5578 RUN();
5579
5580 ASSERT_EQUAL_NZCV(ZCFlag);
5581 }
5582
5583 START();
5584 __ Mov(x0, 0);
5585 __ Mov(x1, 0x1111111111111111);
5586 __ Cmp(x0, Operand(x1));
5587 END();
5588
5589 if (CAN_RUN()) {
5590 RUN();
5591
5592 ASSERT_EQUAL_NZCV(NFlag);
5593 }
5594
5595 START();
5596 __ Mov(w0, 0);
5597 __ Mov(w1, 0x11111111);
5598 __ Cmp(w0, Operand(w1));
5599 END();
5600
5601 if (CAN_RUN()) {
5602 RUN();
5603
5604 ASSERT_EQUAL_NZCV(NFlag);
5605 }
5606
5607 START();
5608 __ Mov(x1, 0x1111111111111111);
5609 __ Cmp(x1, Operand(0));
5610 END();
5611
5612 if (CAN_RUN()) {
5613 RUN();
5614
5615 ASSERT_EQUAL_NZCV(CFlag);
5616 }
5617
5618 START();
5619 __ Mov(w1, 0x11111111);
5620 __ Cmp(w1, Operand(0));
5621 END();
5622
5623 if (CAN_RUN()) {
5624 RUN();
5625
5626 ASSERT_EQUAL_NZCV(CFlag);
5627 }
5628
5629 START();
5630 __ Mov(x0, 1);
5631 __ Mov(x1, 0x7fffffffffffffff);
5632 __ Cmn(x1, Operand(x0));
5633 END();
5634
5635 if (CAN_RUN()) {
5636 RUN();
5637
5638 ASSERT_EQUAL_NZCV(NVFlag);
5639 }
5640
5641 START();
5642 __ Mov(w0, 1);
5643 __ Mov(w1, 0x7fffffff);
5644 __ Cmn(w1, Operand(w0));
5645 END();
5646
5647 if (CAN_RUN()) {
5648 RUN();
5649
5650 ASSERT_EQUAL_NZCV(NVFlag);
5651 }
5652
5653 START();
5654 __ Mov(x0, 1);
5655 __ Mov(x1, 0xffffffffffffffff);
5656 __ Cmn(x1, Operand(x0));
5657 END();
5658
5659 if (CAN_RUN()) {
5660 RUN();
5661
5662 ASSERT_EQUAL_NZCV(ZCFlag);
5663 }
5664
5665 START();
5666 __ Mov(w0, 1);
5667 __ Mov(w1, 0xffffffff);
5668 __ Cmn(w1, Operand(w0));
5669 END();
5670
5671 if (CAN_RUN()) {
5672 RUN();
5673
5674 ASSERT_EQUAL_NZCV(ZCFlag);
5675 }
5676
5677 START();
5678 __ Mov(w0, 0);
5679 __ Mov(w1, 1);
5680 // Clear the C flag.
5681 __ Adds(w0, w0, Operand(0));
5682 __ Ngcs(w0, Operand(w1));
5683 END();
5684
5685 if (CAN_RUN()) {
5686 RUN();
5687
5688 ASSERT_EQUAL_NZCV(NFlag);
5689 }
5690
5691 START();
5692 __ Mov(w0, 0);
5693 __ Mov(w1, 0);
5694 // Set the C flag.
5695 __ Cmp(w0, Operand(w0));
5696 __ Ngcs(w0, Operand(w1));
5697 END();
5698
5699 if (CAN_RUN()) {
5700 RUN();
5701
5702 ASSERT_EQUAL_NZCV(ZCFlag);
5703 }
5704 }
5705
5706
TEST(cmp_shift)5707 TEST(cmp_shift) {
5708 SETUP();
5709
5710 START();
5711 __ Mov(x18, 0xf0000000);
5712 __ Mov(x19, 0xf000000010000000);
5713 __ Mov(x20, 0xf0000000f0000000);
5714 __ Mov(x21, 0x7800000078000000);
5715 __ Mov(x22, 0x3c0000003c000000);
5716 __ Mov(x23, 0x8000000780000000);
5717 __ Mov(x24, 0x0000000f00000000);
5718 __ Mov(x25, 0x00000003c0000000);
5719 __ Mov(x26, 0x8000000780000000);
5720 __ Mov(x27, 0xc0000003);
5721
5722 __ Cmp(w20, Operand(w21, LSL, 1));
5723 __ Mrs(x0, NZCV);
5724
5725 __ Cmp(x20, Operand(x22, LSL, 2));
5726 __ Mrs(x1, NZCV);
5727
5728 __ Cmp(w19, Operand(w23, LSR, 3));
5729 __ Mrs(x2, NZCV);
5730
5731 __ Cmp(x18, Operand(x24, LSR, 4));
5732 __ Mrs(x3, NZCV);
5733
5734 __ Cmp(w20, Operand(w25, ASR, 2));
5735 __ Mrs(x4, NZCV);
5736
5737 __ Cmp(x20, Operand(x26, ASR, 3));
5738 __ Mrs(x5, NZCV);
5739
5740 __ Cmp(w27, Operand(w22, ROR, 28));
5741 __ Mrs(x6, NZCV);
5742
5743 __ Cmp(x20, Operand(x21, ROR, 31));
5744 __ Mrs(x7, NZCV);
5745 END();
5746
5747 if (CAN_RUN()) {
5748 RUN();
5749
5750 ASSERT_EQUAL_32(ZCFlag, w0);
5751 ASSERT_EQUAL_32(ZCFlag, w1);
5752 ASSERT_EQUAL_32(ZCFlag, w2);
5753 ASSERT_EQUAL_32(ZCFlag, w3);
5754 ASSERT_EQUAL_32(ZCFlag, w4);
5755 ASSERT_EQUAL_32(ZCFlag, w5);
5756 ASSERT_EQUAL_32(ZCFlag, w6);
5757 ASSERT_EQUAL_32(ZCFlag, w7);
5758 }
5759 }
5760
5761
TEST(cmp_extend)5762 TEST(cmp_extend) {
5763 SETUP();
5764
5765 START();
5766 __ Mov(w20, 0x2);
5767 __ Mov(w21, 0x1);
5768 __ Mov(x22, 0xffffffffffffffff);
5769 __ Mov(x23, 0xff);
5770 __ Mov(x24, 0xfffffffffffffffe);
5771 __ Mov(x25, 0xffff);
5772 __ Mov(x26, 0xffffffff);
5773
5774 __ Cmp(w20, Operand(w21, LSL, 1));
5775 __ Mrs(x0, NZCV);
5776
5777 __ Cmp(x22, Operand(x23, SXTB, 0));
5778 __ Mrs(x1, NZCV);
5779
5780 __ Cmp(x24, Operand(x23, SXTB, 1));
5781 __ Mrs(x2, NZCV);
5782
5783 __ Cmp(x24, Operand(x23, UXTB, 1));
5784 __ Mrs(x3, NZCV);
5785
5786 __ Cmp(w22, Operand(w25, UXTH));
5787 __ Mrs(x4, NZCV);
5788
5789 __ Cmp(x22, Operand(x25, SXTH));
5790 __ Mrs(x5, NZCV);
5791
5792 __ Cmp(x22, Operand(x26, UXTW));
5793 __ Mrs(x6, NZCV);
5794
5795 __ Cmp(x24, Operand(x26, SXTW, 1));
5796 __ Mrs(x7, NZCV);
5797 END();
5798
5799 if (CAN_RUN()) {
5800 RUN();
5801
5802 ASSERT_EQUAL_32(ZCFlag, w0);
5803 ASSERT_EQUAL_32(ZCFlag, w1);
5804 ASSERT_EQUAL_32(ZCFlag, w2);
5805 ASSERT_EQUAL_32(NCFlag, w3);
5806 ASSERT_EQUAL_32(NCFlag, w4);
5807 ASSERT_EQUAL_32(ZCFlag, w5);
5808 ASSERT_EQUAL_32(NCFlag, w6);
5809 ASSERT_EQUAL_32(ZCFlag, w7);
5810 }
5811 }
5812
5813
TEST(ccmp)5814 TEST(ccmp) {
5815 SETUP();
5816
5817 START();
5818 __ Mov(w16, 0);
5819 __ Mov(w17, 1);
5820 __ Cmp(w16, w16);
5821 __ Ccmp(w16, w17, NCFlag, eq);
5822 __ Mrs(x0, NZCV);
5823
5824 __ Cmp(w16, w16);
5825 __ Ccmp(w16, w17, NCFlag, ne);
5826 __ Mrs(x1, NZCV);
5827
5828 __ Cmp(x16, x16);
5829 __ Ccmn(x16, 2, NZCVFlag, eq);
5830 __ Mrs(x2, NZCV);
5831
5832 __ Cmp(x16, x16);
5833 __ Ccmn(x16, 2, NZCVFlag, ne);
5834 __ Mrs(x3, NZCV);
5835
5836 // The MacroAssembler does not allow al as a condition.
5837 {
5838 ExactAssemblyScope scope(&masm, kInstructionSize);
5839 __ ccmp(x16, x16, NZCVFlag, al);
5840 }
5841 __ Mrs(x4, NZCV);
5842
5843 // The MacroAssembler does not allow nv as a condition.
5844 {
5845 ExactAssemblyScope scope(&masm, kInstructionSize);
5846 __ ccmp(x16, x16, NZCVFlag, nv);
5847 }
5848 __ Mrs(x5, NZCV);
5849
5850 END();
5851
5852 if (CAN_RUN()) {
5853 RUN();
5854
5855 ASSERT_EQUAL_32(NFlag, w0);
5856 ASSERT_EQUAL_32(NCFlag, w1);
5857 ASSERT_EQUAL_32(NoFlag, w2);
5858 ASSERT_EQUAL_32(NZCVFlag, w3);
5859 ASSERT_EQUAL_32(ZCFlag, w4);
5860 ASSERT_EQUAL_32(ZCFlag, w5);
5861 }
5862 }
5863
5864
TEST(ccmp_wide_imm)5865 TEST(ccmp_wide_imm) {
5866 SETUP();
5867
5868 START();
5869 __ Mov(w20, 0);
5870
5871 __ Cmp(w20, Operand(w20));
5872 __ Ccmp(w20, Operand(0x12345678), NZCVFlag, eq);
5873 __ Mrs(x0, NZCV);
5874
5875 __ Cmp(w20, Operand(w20));
5876 __ Ccmp(x20, Operand(0xffffffffffffffff), NZCVFlag, eq);
5877 __ Mrs(x1, NZCV);
5878 END();
5879
5880 if (CAN_RUN()) {
5881 RUN();
5882
5883 ASSERT_EQUAL_32(NFlag, w0);
5884 ASSERT_EQUAL_32(NoFlag, w1);
5885 }
5886 }
5887
5888
TEST(ccmp_shift_extend)5889 TEST(ccmp_shift_extend) {
5890 SETUP();
5891
5892 START();
5893 __ Mov(w20, 0x2);
5894 __ Mov(w21, 0x1);
5895 __ Mov(x22, 0xffffffffffffffff);
5896 __ Mov(x23, 0xff);
5897 __ Mov(x24, 0xfffffffffffffffe);
5898
5899 __ Cmp(w20, Operand(w20));
5900 __ Ccmp(w20, Operand(w21, LSL, 1), NZCVFlag, eq);
5901 __ Mrs(x0, NZCV);
5902
5903 __ Cmp(w20, Operand(w20));
5904 __ Ccmp(x22, Operand(x23, SXTB, 0), NZCVFlag, eq);
5905 __ Mrs(x1, NZCV);
5906
5907 __ Cmp(w20, Operand(w20));
5908 __ Ccmp(x24, Operand(x23, SXTB, 1), NZCVFlag, eq);
5909 __ Mrs(x2, NZCV);
5910
5911 __ Cmp(w20, Operand(w20));
5912 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, eq);
5913 __ Mrs(x3, NZCV);
5914
5915 __ Cmp(w20, Operand(w20));
5916 __ Ccmp(x24, Operand(x23, UXTB, 1), NZCVFlag, ne);
5917 __ Mrs(x4, NZCV);
5918 END();
5919
5920 if (CAN_RUN()) {
5921 RUN();
5922
5923 ASSERT_EQUAL_32(ZCFlag, w0);
5924 ASSERT_EQUAL_32(ZCFlag, w1);
5925 ASSERT_EQUAL_32(ZCFlag, w2);
5926 ASSERT_EQUAL_32(NCFlag, w3);
5927 ASSERT_EQUAL_32(NZCVFlag, w4);
5928 }
5929 }
5930
5931
TEST(csel_reg)5932 TEST(csel_reg) {
5933 SETUP();
5934
5935 START();
5936 __ Mov(x16, 0);
5937 __ Mov(x24, 0x0000000f0000000f);
5938 __ Mov(x25, 0x0000001f0000001f);
5939
5940 __ Cmp(w16, Operand(0));
5941 __ Csel(w0, w24, w25, eq);
5942 __ Csel(w1, w24, w25, ne);
5943 __ Csinc(w2, w24, w25, mi);
5944 __ Csinc(w3, w24, w25, pl);
5945
5946 // The MacroAssembler does not allow al or nv as a condition.
5947 {
5948 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
5949 __ csel(w13, w24, w25, al);
5950 __ csel(x14, x24, x25, nv);
5951 }
5952
5953 __ Cmp(x16, Operand(1));
5954 __ Csinv(x4, x24, x25, gt);
5955 __ Csinv(x5, x24, x25, le);
5956 __ Csneg(x6, x24, x25, hs);
5957 __ Csneg(x7, x24, x25, lo);
5958
5959 __ Cset(w8, ne);
5960 __ Csetm(w9, ne);
5961 __ Cinc(x10, x25, ne);
5962 __ Cinv(x11, x24, ne);
5963 __ Cneg(x12, x24, ne);
5964
5965 // The MacroAssembler does not allow al or nv as a condition.
5966 {
5967 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
5968 __ csel(w15, w24, w25, al);
5969 __ csel(x17, x24, x25, nv);
5970 }
5971
5972 END();
5973
5974 if (CAN_RUN()) {
5975 RUN();
5976
5977 ASSERT_EQUAL_64(0x0000000f, x0);
5978 ASSERT_EQUAL_64(0x0000001f, x1);
5979 ASSERT_EQUAL_64(0x00000020, x2);
5980 ASSERT_EQUAL_64(0x0000000f, x3);
5981 ASSERT_EQUAL_64(0xffffffe0ffffffe0, x4);
5982 ASSERT_EQUAL_64(0x0000000f0000000f, x5);
5983 ASSERT_EQUAL_64(0xffffffe0ffffffe1, x6);
5984 ASSERT_EQUAL_64(0x0000000f0000000f, x7);
5985 ASSERT_EQUAL_64(0x00000001, x8);
5986 ASSERT_EQUAL_64(0xffffffff, x9);
5987 ASSERT_EQUAL_64(0x0000001f00000020, x10);
5988 ASSERT_EQUAL_64(0xfffffff0fffffff0, x11);
5989 ASSERT_EQUAL_64(0xfffffff0fffffff1, x12);
5990 ASSERT_EQUAL_64(0x0000000f, x13);
5991 ASSERT_EQUAL_64(0x0000000f0000000f, x14);
5992 ASSERT_EQUAL_64(0x0000000f, x15);
5993 ASSERT_EQUAL_64(0x0000000f0000000f, x17);
5994 }
5995 }
5996
TEST(csel_zero)5997 TEST(csel_zero) {
5998 SETUP();
5999
6000 START();
6001
6002 __ Mov(x15, 0x0);
6003 __ Mov(x16, 0x0000001f0000002f);
6004
6005 // Check results when zero registers are used as inputs
6006 // for Csinc, Csinv and Csneg for both true and false conditions.
6007 __ Cmp(x15, 0);
6008 __ Csinc(x0, x16, xzr, eq);
6009 __ Csinc(x1, xzr, x16, eq);
6010 __ Cmp(x15, 1);
6011 __ Csinc(w2, w16, wzr, eq);
6012 __ Csinc(w3, wzr, w16, eq);
6013
6014 __ Csinc(x4, xzr, xzr, eq);
6015
6016 __ Cmp(x15, 0);
6017 __ Csinv(x5, x16, xzr, eq);
6018 __ Csinv(x6, xzr, x16, eq);
6019 __ Cmp(x15, 1);
6020 __ Csinv(w7, w16, wzr, eq);
6021 __ Csinv(w8, wzr, w16, eq);
6022
6023 __ Csinv(x9, xzr, xzr, eq);
6024
6025 __ Cmp(x15, 0);
6026 __ Csneg(x10, x16, xzr, eq);
6027 __ Csneg(x11, xzr, x16, eq);
6028 __ Cmp(x15, 1);
6029 __ Csneg(w12, w16, wzr, eq);
6030 __ Csneg(w13, wzr, w16, eq);
6031
6032 __ Csneg(x14, xzr, xzr, eq);
6033
6034 END();
6035
6036 if (CAN_RUN()) {
6037 RUN();
6038
6039 ASSERT_EQUAL_64(0x0000001f0000002f, x0);
6040 ASSERT_EQUAL_64(0x0, x1);
6041 ASSERT_EQUAL_32(0x1, w2);
6042 ASSERT_EQUAL_32(0x30, w3);
6043 ASSERT_EQUAL_64(0x1, x4);
6044 ASSERT_EQUAL_64(0x0000001f0000002f, x5);
6045 ASSERT_EQUAL_64(0x0, x6);
6046 ASSERT_EQUAL_32(0xffffffff, w7);
6047 ASSERT_EQUAL_32(0xffffffd0, w8);
6048 ASSERT_EQUAL_64(0xffffffffffffffff, x9);
6049 ASSERT_EQUAL_64(0x0000001f0000002f, x10);
6050 ASSERT_EQUAL_64(0x0, x11);
6051 ASSERT_EQUAL_32(0x0, w12);
6052 ASSERT_EQUAL_32(0xffffffd1, w13);
6053 ASSERT_EQUAL_64(0x0, x14);
6054 }
6055 }
6056
6057
TEST(csel_imm)6058 TEST(csel_imm) {
6059 SETUP();
6060
6061 int values[] = {-123, -2, -1, 0, 1, 2, 123};
6062 int n_values = sizeof(values) / sizeof(values[0]);
6063
6064 for (int i = 0; i < n_values; i++) {
6065 for (int j = 0; j < n_values; j++) {
6066 int left = values[i];
6067 int right = values[j];
6068
6069 START();
6070 __ Mov(x10, 0);
6071 __ Cmp(x10, 0);
6072 __ Csel(w0, left, right, eq);
6073 __ Csel(w1, left, right, ne);
6074 __ Csel(x2, left, right, eq);
6075 __ Csel(x3, left, right, ne);
6076
6077 END();
6078
6079 if (CAN_RUN()) {
6080 RUN();
6081
6082 ASSERT_EQUAL_32(left, w0);
6083 ASSERT_EQUAL_32(right, w1);
6084 ASSERT_EQUAL_64(left, x2);
6085 ASSERT_EQUAL_64(right, x3);
6086 }
6087 }
6088 }
6089 }
6090
6091
TEST(csel_mixed)6092 TEST(csel_mixed) {
6093 SETUP();
6094
6095 START();
6096 __ Mov(x18, 0);
6097 __ Mov(x19, 0x80000000);
6098 __ Mov(x20, 0x8000000000000000);
6099
6100 __ Cmp(x18, Operand(0));
6101 __ Csel(w0, w19, -2, ne);
6102 __ Csel(w1, w19, -1, ne);
6103 __ Csel(w2, w19, 0, ne);
6104 __ Csel(w3, w19, 1, ne);
6105 __ Csel(w4, w19, 2, ne);
6106 __ Csel(w5, w19, Operand(w19, ASR, 31), ne);
6107 __ Csel(w6, w19, Operand(w19, ROR, 1), ne);
6108 __ Csel(w7, w19, 3, eq);
6109
6110 __ Csel(x8, x20, -2, ne);
6111 __ Csel(x9, x20, -1, ne);
6112 __ Csel(x10, x20, 0, ne);
6113 __ Csel(x11, x20, 1, ne);
6114 __ Csel(x12, x20, 2, ne);
6115 __ Csel(x13, x20, Operand(x20, ASR, 63), ne);
6116 __ Csel(x14, x20, Operand(x20, ROR, 1), ne);
6117 __ Csel(x15, x20, 3, eq);
6118
6119 END();
6120
6121 if (CAN_RUN()) {
6122 RUN();
6123
6124 ASSERT_EQUAL_32(-2, w0);
6125 ASSERT_EQUAL_32(-1, w1);
6126 ASSERT_EQUAL_32(0, w2);
6127 ASSERT_EQUAL_32(1, w3);
6128 ASSERT_EQUAL_32(2, w4);
6129 ASSERT_EQUAL_32(-1, w5);
6130 ASSERT_EQUAL_32(0x40000000, w6);
6131 ASSERT_EQUAL_32(0x80000000, w7);
6132
6133 ASSERT_EQUAL_64(-2, x8);
6134 ASSERT_EQUAL_64(-1, x9);
6135 ASSERT_EQUAL_64(0, x10);
6136 ASSERT_EQUAL_64(1, x11);
6137 ASSERT_EQUAL_64(2, x12);
6138 ASSERT_EQUAL_64(-1, x13);
6139 ASSERT_EQUAL_64(0x4000000000000000, x14);
6140 ASSERT_EQUAL_64(0x8000000000000000, x15);
6141 }
6142 }
6143
6144
TEST(lslv)6145 TEST(lslv) {
6146 SETUP();
6147
6148 uint64_t value = 0x0123456789abcdef;
6149 int shift[] = {1, 3, 5, 9, 17, 33};
6150
6151 START();
6152 __ Mov(x0, value);
6153 __ Mov(w1, shift[0]);
6154 __ Mov(w2, shift[1]);
6155 __ Mov(w3, shift[2]);
6156 __ Mov(w4, shift[3]);
6157 __ Mov(w5, shift[4]);
6158 __ Mov(w6, shift[5]);
6159
6160 // The MacroAssembler does not allow zr as an argument.
6161 {
6162 ExactAssemblyScope scope(&masm, kInstructionSize);
6163 __ lslv(x0, x0, xzr);
6164 }
6165
6166 __ Lsl(x16, x0, x1);
6167 __ Lsl(x17, x0, x2);
6168 __ Lsl(x18, x0, x3);
6169 __ Lsl(x19, x0, x4);
6170 __ Lsl(x20, x0, x5);
6171 __ Lsl(x21, x0, x6);
6172
6173 __ Lsl(w22, w0, w1);
6174 __ Lsl(w23, w0, w2);
6175 __ Lsl(w24, w0, w3);
6176 __ Lsl(w25, w0, w4);
6177 __ Lsl(w26, w0, w5);
6178 __ Lsl(w27, w0, w6);
6179 END();
6180
6181 if (CAN_RUN()) {
6182 RUN();
6183
6184 ASSERT_EQUAL_64(value, x0);
6185 ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
6186 ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
6187 ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
6188 ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
6189 ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
6190 ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
6191 ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
6192 ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
6193 ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
6194 ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
6195 ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
6196 ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
6197 }
6198 }
6199
6200
TEST(lsrv)6201 TEST(lsrv) {
6202 SETUP();
6203
6204 uint64_t value = 0x0123456789abcdef;
6205 int shift[] = {1, 3, 5, 9, 17, 33};
6206
6207 START();
6208 __ Mov(x0, value);
6209 __ Mov(w1, shift[0]);
6210 __ Mov(w2, shift[1]);
6211 __ Mov(w3, shift[2]);
6212 __ Mov(w4, shift[3]);
6213 __ Mov(w5, shift[4]);
6214 __ Mov(w6, shift[5]);
6215
6216 // The MacroAssembler does not allow zr as an argument.
6217 {
6218 ExactAssemblyScope scope(&masm, kInstructionSize);
6219 __ lsrv(x0, x0, xzr);
6220 }
6221
6222 __ Lsr(x16, x0, x1);
6223 __ Lsr(x17, x0, x2);
6224 __ Lsr(x18, x0, x3);
6225 __ Lsr(x19, x0, x4);
6226 __ Lsr(x20, x0, x5);
6227 __ Lsr(x21, x0, x6);
6228
6229 __ Lsr(w22, w0, w1);
6230 __ Lsr(w23, w0, w2);
6231 __ Lsr(w24, w0, w3);
6232 __ Lsr(w25, w0, w4);
6233 __ Lsr(w26, w0, w5);
6234 __ Lsr(w27, w0, w6);
6235 END();
6236
6237 if (CAN_RUN()) {
6238 RUN();
6239
6240 ASSERT_EQUAL_64(value, x0);
6241 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
6242 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
6243 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
6244 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
6245 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
6246 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
6247
6248 value &= 0xffffffff;
6249 ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
6250 ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
6251 ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
6252 ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
6253 ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
6254 ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
6255 }
6256 }
6257
6258
TEST(asrv)6259 TEST(asrv) {
6260 SETUP();
6261
6262 int64_t value = 0xfedcba98fedcba98;
6263 int shift[] = {1, 3, 5, 9, 17, 33};
6264
6265 START();
6266 __ Mov(x0, value);
6267 __ Mov(w1, shift[0]);
6268 __ Mov(w2, shift[1]);
6269 __ Mov(w3, shift[2]);
6270 __ Mov(w4, shift[3]);
6271 __ Mov(w5, shift[4]);
6272 __ Mov(w6, shift[5]);
6273
6274 // The MacroAssembler does not allow zr as an argument.
6275 {
6276 ExactAssemblyScope scope(&masm, kInstructionSize);
6277 __ asrv(x0, x0, xzr);
6278 }
6279
6280 __ Asr(x16, x0, x1);
6281 __ Asr(x17, x0, x2);
6282 __ Asr(x18, x0, x3);
6283 __ Asr(x19, x0, x4);
6284 __ Asr(x20, x0, x5);
6285 __ Asr(x21, x0, x6);
6286
6287 __ Asr(w22, w0, w1);
6288 __ Asr(w23, w0, w2);
6289 __ Asr(w24, w0, w3);
6290 __ Asr(w25, w0, w4);
6291 __ Asr(w26, w0, w5);
6292 __ Asr(w27, w0, w6);
6293 END();
6294
6295 if (CAN_RUN()) {
6296 RUN();
6297
6298 ASSERT_EQUAL_64(value, x0);
6299 ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
6300 ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
6301 ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
6302 ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
6303 ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
6304 ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
6305
6306 int32_t value32 = static_cast<int32_t>(value & 0xffffffff);
6307 ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
6308 ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
6309 ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
6310 ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
6311 ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
6312 ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
6313 }
6314 }
6315
6316
TEST(rorv)6317 TEST(rorv) {
6318 SETUP();
6319
6320 uint64_t value = 0x0123456789abcdef;
6321 int shift[] = {4, 8, 12, 16, 24, 36};
6322
6323 START();
6324 __ Mov(x0, value);
6325 __ Mov(w1, shift[0]);
6326 __ Mov(w2, shift[1]);
6327 __ Mov(w3, shift[2]);
6328 __ Mov(w4, shift[3]);
6329 __ Mov(w5, shift[4]);
6330 __ Mov(w6, shift[5]);
6331
6332 // The MacroAssembler does not allow zr as an argument.
6333 {
6334 ExactAssemblyScope scope(&masm, kInstructionSize);
6335 __ rorv(x0, x0, xzr);
6336 }
6337
6338 __ Ror(x16, x0, x1);
6339 __ Ror(x17, x0, x2);
6340 __ Ror(x18, x0, x3);
6341 __ Ror(x19, x0, x4);
6342 __ Ror(x20, x0, x5);
6343 __ Ror(x21, x0, x6);
6344
6345 __ Ror(w22, w0, w1);
6346 __ Ror(w23, w0, w2);
6347 __ Ror(w24, w0, w3);
6348 __ Ror(w25, w0, w4);
6349 __ Ror(w26, w0, w5);
6350 __ Ror(w27, w0, w6);
6351 END();
6352
6353 if (CAN_RUN()) {
6354 RUN();
6355
6356 ASSERT_EQUAL_64(value, x0);
6357 ASSERT_EQUAL_64(0xf0123456789abcde, x16);
6358 ASSERT_EQUAL_64(0xef0123456789abcd, x17);
6359 ASSERT_EQUAL_64(0xdef0123456789abc, x18);
6360 ASSERT_EQUAL_64(0xcdef0123456789ab, x19);
6361 ASSERT_EQUAL_64(0xabcdef0123456789, x20);
6362 ASSERT_EQUAL_64(0x789abcdef0123456, x21);
6363 ASSERT_EQUAL_32(0xf89abcde, w22);
6364 ASSERT_EQUAL_32(0xef89abcd, w23);
6365 ASSERT_EQUAL_32(0xdef89abc, w24);
6366 ASSERT_EQUAL_32(0xcdef89ab, w25);
6367 ASSERT_EQUAL_32(0xabcdef89, w26);
6368 ASSERT_EQUAL_32(0xf89abcde, w27);
6369 }
6370 }
6371
6372
TEST(bfm)6373 TEST(bfm) {
6374 SETUP();
6375
6376 START();
6377 __ Mov(x1, 0x0123456789abcdef);
6378
6379 __ Mov(x10, 0x8888888888888888);
6380 __ Mov(x11, 0x8888888888888888);
6381 __ Mov(x12, 0x8888888888888888);
6382 __ Mov(x13, 0x8888888888888888);
6383 __ Mov(x14, 0xffffffffffffffff);
6384 __ Mov(w20, 0x88888888);
6385 __ Mov(w21, 0x88888888);
6386
6387 __ Bfm(x10, x1, 16, 31);
6388 __ Bfm(x11, x1, 32, 15);
6389
6390 __ Bfm(w20, w1, 16, 23);
6391 __ Bfm(w21, w1, 24, 15);
6392
6393 // Aliases.
6394 __ Bfi(x12, x1, 16, 8);
6395 __ Bfxil(x13, x1, 16, 8);
6396 __ Bfc(x14, 16, 8);
6397 END();
6398
6399 if (CAN_RUN()) {
6400 RUN();
6401
6402
6403 ASSERT_EQUAL_64(0x88888888888889ab, x10);
6404 ASSERT_EQUAL_64(0x8888cdef88888888, x11);
6405
6406 ASSERT_EQUAL_32(0x888888ab, w20);
6407 ASSERT_EQUAL_32(0x88cdef88, w21);
6408
6409 ASSERT_EQUAL_64(0x8888888888ef8888, x12);
6410 ASSERT_EQUAL_64(0x88888888888888ab, x13);
6411 ASSERT_EQUAL_64(0xffffffffff00ffff, x14);
6412 }
6413 }
6414
6415
TEST(sbfm)6416 TEST(sbfm) {
6417 SETUP();
6418
6419 START();
6420 __ Mov(x1, 0x0123456789abcdef);
6421 __ Mov(x2, 0xfedcba9876543210);
6422
6423 __ Sbfm(x10, x1, 16, 31);
6424 __ Sbfm(x11, x1, 32, 15);
6425 __ Sbfm(x12, x1, 32, 47);
6426 __ Sbfm(x13, x1, 48, 35);
6427
6428 __ Sbfm(w14, w1, 16, 23);
6429 __ Sbfm(w15, w1, 24, 15);
6430 __ Sbfm(w16, w2, 16, 23);
6431 __ Sbfm(w17, w2, 24, 15);
6432
6433 // Aliases.
6434 __ Asr(x18, x1, 32);
6435 __ Asr(x19, x2, 32);
6436 __ Sbfiz(x20, x1, 8, 16);
6437 __ Sbfiz(x21, x2, 8, 16);
6438 __ Sbfx(x22, x1, 8, 16);
6439 __ Sbfx(x23, x2, 8, 16);
6440 __ Sxtb(x24, w1);
6441 __ Sxtb(x25, x2);
6442 __ Sxth(x26, w1);
6443 __ Sxth(x27, x2);
6444 __ Sxtw(x28, w1);
6445 __ Sxtw(x29, x2);
6446 END();
6447
6448 if (CAN_RUN()) {
6449 RUN();
6450
6451
6452 ASSERT_EQUAL_64(0xffffffffffff89ab, x10);
6453 ASSERT_EQUAL_64(0xffffcdef00000000, x11);
6454 ASSERT_EQUAL_64(0x0000000000004567, x12);
6455 ASSERT_EQUAL_64(0x000789abcdef0000, x13);
6456
6457 ASSERT_EQUAL_32(0xffffffab, w14);
6458 ASSERT_EQUAL_32(0xffcdef00, w15);
6459 ASSERT_EQUAL_32(0x00000054, w16);
6460 ASSERT_EQUAL_32(0x00321000, w17);
6461
6462 ASSERT_EQUAL_64(0x0000000001234567, x18);
6463 ASSERT_EQUAL_64(0xfffffffffedcba98, x19);
6464 ASSERT_EQUAL_64(0xffffffffffcdef00, x20);
6465 ASSERT_EQUAL_64(0x0000000000321000, x21);
6466 ASSERT_EQUAL_64(0xffffffffffffabcd, x22);
6467 ASSERT_EQUAL_64(0x0000000000005432, x23);
6468 ASSERT_EQUAL_64(0xffffffffffffffef, x24);
6469 ASSERT_EQUAL_64(0x0000000000000010, x25);
6470 ASSERT_EQUAL_64(0xffffffffffffcdef, x26);
6471 ASSERT_EQUAL_64(0x0000000000003210, x27);
6472 ASSERT_EQUAL_64(0xffffffff89abcdef, x28);
6473 ASSERT_EQUAL_64(0x0000000076543210, x29);
6474 }
6475 }
6476
6477
TEST(ubfm)6478 TEST(ubfm) {
6479 SETUP();
6480
6481 START();
6482 __ Mov(x1, 0x0123456789abcdef);
6483 __ Mov(x2, 0xfedcba9876543210);
6484
6485 __ Mov(x10, 0x8888888888888888);
6486 __ Mov(x11, 0x8888888888888888);
6487
6488 __ Ubfm(x10, x1, 16, 31);
6489 __ Ubfm(x11, x1, 32, 15);
6490 __ Ubfm(x12, x1, 32, 47);
6491 __ Ubfm(x13, x1, 48, 35);
6492
6493 __ Ubfm(w25, w1, 16, 23);
6494 __ Ubfm(w26, w1, 24, 15);
6495 __ Ubfm(w27, w2, 16, 23);
6496 __ Ubfm(w28, w2, 24, 15);
6497
6498 // Aliases
6499 __ Lsl(x15, x1, 63);
6500 __ Lsl(x16, x1, 0);
6501 __ Lsr(x17, x1, 32);
6502 __ Ubfiz(x18, x1, 8, 16);
6503 __ Ubfx(x19, x1, 8, 16);
6504 __ Uxtb(x20, x1);
6505 __ Uxth(x21, x1);
6506 __ Uxtw(x22, x1);
6507 END();
6508
6509 if (CAN_RUN()) {
6510 RUN();
6511
6512 ASSERT_EQUAL_64(0x00000000000089ab, x10);
6513 ASSERT_EQUAL_64(0x0000cdef00000000, x11);
6514 ASSERT_EQUAL_64(0x0000000000004567, x12);
6515 ASSERT_EQUAL_64(0x000789abcdef0000, x13);
6516
6517 ASSERT_EQUAL_32(0x000000ab, w25);
6518 ASSERT_EQUAL_32(0x00cdef00, w26);
6519 ASSERT_EQUAL_32(0x00000054, w27);
6520 ASSERT_EQUAL_32(0x00321000, w28);
6521
6522 ASSERT_EQUAL_64(0x8000000000000000, x15);
6523 ASSERT_EQUAL_64(0x0123456789abcdef, x16);
6524 ASSERT_EQUAL_64(0x0000000001234567, x17);
6525 ASSERT_EQUAL_64(0x0000000000cdef00, x18);
6526 ASSERT_EQUAL_64(0x000000000000abcd, x19);
6527 ASSERT_EQUAL_64(0x00000000000000ef, x20);
6528 ASSERT_EQUAL_64(0x000000000000cdef, x21);
6529 ASSERT_EQUAL_64(0x0000000089abcdef, x22);
6530 }
6531 }
6532
6533
TEST(extr)6534 TEST(extr) {
6535 SETUP();
6536
6537 START();
6538 __ Mov(x1, 0x0123456789abcdef);
6539 __ Mov(x2, 0xfedcba9876543210);
6540
6541 __ Extr(w10, w1, w2, 0);
6542 __ Extr(w11, w1, w2, 1);
6543 __ Extr(x12, x2, x1, 2);
6544
6545 __ Ror(w13, w1, 0);
6546 __ Ror(w14, w2, 17);
6547 __ Ror(w15, w1, 31);
6548 __ Ror(x18, x2, 0);
6549 __ Ror(x19, x2, 1);
6550 __ Ror(x20, x1, 63);
6551 END();
6552
6553 if (CAN_RUN()) {
6554 RUN();
6555
6556 ASSERT_EQUAL_64(0x76543210, x10);
6557 ASSERT_EQUAL_64(0xbb2a1908, x11);
6558 ASSERT_EQUAL_64(0x0048d159e26af37b, x12);
6559 ASSERT_EQUAL_64(0x89abcdef, x13);
6560 ASSERT_EQUAL_64(0x19083b2a, x14);
6561 ASSERT_EQUAL_64(0x13579bdf, x15);
6562 ASSERT_EQUAL_64(0xfedcba9876543210, x18);
6563 ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908, x19);
6564 ASSERT_EQUAL_64(0x02468acf13579bde, x20);
6565 }
6566 }
6567
6568
TEST(system_mrs)6569 TEST(system_mrs) {
6570 SETUP();
6571
6572 START();
6573 __ Mov(w0, 0);
6574 __ Mov(w1, 1);
6575 __ Mov(w2, 0x80000000);
6576
6577 // Set the Z and C flags.
6578 __ Cmp(w0, w0);
6579 __ Mrs(x3, NZCV);
6580
6581 // Set the N flag.
6582 __ Cmp(w0, w1);
6583 __ Mrs(x4, NZCV);
6584
6585 // Set the Z, C and V flags.
6586 __ Adds(w0, w2, w2);
6587 __ Mrs(x5, NZCV);
6588
6589 // Read the default FPCR.
6590 __ Mrs(x6, FPCR);
6591 END();
6592
6593 if (CAN_RUN()) {
6594 RUN();
6595
6596 // NZCV
6597 ASSERT_EQUAL_32(ZCFlag, w3);
6598 ASSERT_EQUAL_32(NFlag, w4);
6599 ASSERT_EQUAL_32(ZCVFlag, w5);
6600
6601 // FPCR
6602 // The default FPCR on Linux-based platforms is 0.
6603 ASSERT_EQUAL_32(0, w6);
6604 }
6605 }
6606
TEST(system_rng)6607 TEST(system_rng) {
6608 SETUP_WITH_FEATURES(CPUFeatures::kRNG);
6609
6610 START();
6611 // Random number.
6612 __ Mrs(x1, RNDR);
6613 // Assume that each generation is successful now.
6614 // TODO: Return failure occasionally.
6615 __ Mrs(x2, NZCV);
6616 __ Mrs(x3, RNDR);
6617 __ Mrs(x4, NZCV);
6618
6619 // Reseeded random number.
6620 __ Mrs(x5, RNDRRS);
6621 // Assume that each generation is successful now.
6622 // TODO: Return failure occasionally.
6623 __ Mrs(x6, NZCV);
6624 __ Mrs(x7, RNDRRS);
6625 __ Mrs(x8, NZCV);
6626 END();
6627
6628 if (CAN_RUN()) {
6629 RUN();
6630 // Random number generation series.
6631 // Check random numbers have been generated and aren't equal when reseed has
6632 // happened.
6633 // NOTE: With a different architectural implementation, there may be a
6634 // collison.
6635 // TODO: Return failure occasionally. Set ZFlag and return UNKNOWN value.
6636 ASSERT_NOT_EQUAL_64(x1, x3);
6637 ASSERT_EQUAL_64(NoFlag, x2);
6638 ASSERT_EQUAL_64(NoFlag, x4);
6639 ASSERT_NOT_EQUAL_64(x5, x7);
6640 ASSERT_EQUAL_64(NoFlag, x6);
6641 ASSERT_EQUAL_64(NoFlag, x8);
6642 }
6643 }
6644
TEST(cfinv)6645 TEST(cfinv) {
6646 SETUP_WITH_FEATURES(CPUFeatures::kFlagM);
6647
6648 START();
6649 __ Mov(w0, 1);
6650
6651 // Set the C flag.
6652 __ Cmp(w0, 0);
6653 __ Mrs(x1, NZCV);
6654
6655 // Invert the C flag.
6656 __ Cfinv();
6657 __ Mrs(x2, NZCV);
6658
6659 // Invert the C flag again.
6660 __ Cfinv();
6661 __ Mrs(x3, NZCV);
6662 END();
6663
6664 if (CAN_RUN()) {
6665 RUN();
6666
6667 ASSERT_EQUAL_32(CFlag, w1);
6668 ASSERT_EQUAL_32(NoFlag, w2);
6669 ASSERT_EQUAL_32(CFlag, w3);
6670 }
6671 }
6672
6673
TEST(axflag_xaflag)6674 TEST(axflag_xaflag) {
6675 // The AXFLAG and XAFLAG instructions are designed for converting the FP
6676 // conditional flags from Arm format to an alternate format efficiently.
6677 // There are only 4 cases which are relevant for this conversion but we test
6678 // the behaviour for all 16 cases anyway. The 4 important cases are labelled
6679 // below.
6680 StatusFlags expected_x[16] = {NoFlag,
6681 ZFlag,
6682 CFlag, // Greater than
6683 ZFlag, // Unordered
6684 ZFlag,
6685 ZFlag,
6686 ZCFlag, // Equal to
6687 ZFlag,
6688 NoFlag, // Less than
6689 ZFlag,
6690 CFlag,
6691 ZFlag,
6692 ZFlag,
6693 ZFlag,
6694 ZCFlag,
6695 ZFlag};
6696 StatusFlags expected_a[16] = {NFlag, // Less than
6697 NFlag,
6698 CFlag, // Greater than
6699 CFlag,
6700 CVFlag, // Unordered
6701 CVFlag,
6702 ZCFlag, // Equal to
6703 ZCFlag,
6704 NFlag,
6705 NFlag,
6706 CFlag,
6707 CFlag,
6708 CVFlag,
6709 CVFlag,
6710 ZCFlag,
6711 ZCFlag};
6712
6713 for (unsigned i = 0; i < 16; i++) {
6714 SETUP_WITH_FEATURES(CPUFeatures::kAXFlag);
6715
6716 START();
6717 __ Mov(x0, i << Flags_offset);
6718 __ Msr(NZCV, x0);
6719 __ Axflag();
6720 __ Mrs(x1, NZCV);
6721 __ Msr(NZCV, x0);
6722 __ Xaflag();
6723 __ Mrs(x2, NZCV);
6724 END();
6725
6726 if (CAN_RUN()) {
6727 RUN();
6728 ASSERT_EQUAL_32(expected_x[i], w1);
6729 ASSERT_EQUAL_32(expected_a[i], w2);
6730 }
6731 }
6732 }
6733
6734
TEST(system_msr)6735 TEST(system_msr) {
6736 // All FPCR fields that must be implemented: AHP, DN, FZ, RMode
6737 const uint64_t fpcr_core = (0b1 << 26) | // AHP
6738 (0b1 << 25) | // DN
6739 (0b1 << 24) | // FZ
6740 (0b11 << 22); // RMode
6741
6742 SETUP();
6743
6744 START();
6745 __ Mov(w0, 0);
6746 __ Mov(w1, 0x7fffffff);
6747
6748 __ Mov(x7, 0);
6749
6750 __ Mov(x10, NVFlag);
6751 __ Cmp(w0, w0); // Set Z and C.
6752 __ Msr(NZCV, x10); // Set N and V.
6753 // The Msr should have overwritten every flag set by the Cmp.
6754 __ Cinc(x7, x7, mi); // N
6755 __ Cinc(x7, x7, ne); // !Z
6756 __ Cinc(x7, x7, lo); // !C
6757 __ Cinc(x7, x7, vs); // V
6758
6759 __ Mov(x10, ZCFlag);
6760 __ Cmn(w1, w1); // Set N and V.
6761 __ Msr(NZCV, x10); // Set Z and C.
6762 // The Msr should have overwritten every flag set by the Cmn.
6763 __ Cinc(x7, x7, pl); // !N
6764 __ Cinc(x7, x7, eq); // Z
6765 __ Cinc(x7, x7, hs); // C
6766 __ Cinc(x7, x7, vc); // !V
6767
6768 // All core FPCR fields must be writable.
6769 __ Mov(x8, fpcr_core);
6770 __ Msr(FPCR, x8);
6771 __ Mrs(x8, FPCR);
6772
6773 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
6774 // All FPCR fields that aren't `RES0`:
6775 const uint64_t fpcr_all = fpcr_core | (0b11 << 20) | // Stride
6776 (0b1 << 19) | // FZ16
6777 (0b111 << 16) | // Len
6778 (0b1 << 15) | // IDE
6779 (0b1 << 12) | // IXE
6780 (0b1 << 11) | // UFE
6781 (0b1 << 10) | // OFE
6782 (0b1 << 9) | // DZE
6783 (0b1 << 8); // IOE
6784
6785 // All FPCR fields, including optional ones. This part of the test doesn't
6786 // achieve much other than ensuring that supported fields can be cleared by
6787 // the next test.
6788 __ Mov(x9, fpcr_all);
6789 __ Msr(FPCR, x9);
6790 __ Mrs(x9, FPCR);
6791 __ And(x9, x9, fpcr_core);
6792
6793 // The undefined bits must ignore writes.
6794 // It's conceivable that a future version of the architecture could use these
6795 // fields (making this test fail), but in the meantime this is a useful test
6796 // for the simulator.
6797 __ Mov(x10, ~fpcr_all);
6798 __ Msr(FPCR, x10);
6799 __ Mrs(x10, FPCR);
6800 #endif
6801
6802 END();
6803
6804 if (CAN_RUN()) {
6805 RUN();
6806
6807 // We should have incremented x7 (from 0) exactly 8 times.
6808 ASSERT_EQUAL_64(8, x7);
6809
6810 ASSERT_EQUAL_64(fpcr_core, x8);
6811
6812 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
6813 ASSERT_EQUAL_64(fpcr_core, x9);
6814 ASSERT_EQUAL_64(0, x10);
6815 #endif
6816 }
6817 }
6818
6819
TEST(system_pauth_a)6820 TEST(system_pauth_a) {
6821 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
6822 START();
6823
6824 // Exclude x16 and x17 from the scratch register list so we can use
6825 // Pac/Autia1716 safely.
6826 UseScratchRegisterScope temps(&masm);
6827 temps.Exclude(x16, x17);
6828 temps.Include(x10, x11);
6829
6830 // Backup stack pointer.
6831 __ Mov(x20, sp);
6832
6833 // Modifiers
6834 __ Mov(x16, 0x477d469dec0b8760);
6835 __ Mov(sp, 0x477d469dec0b8760);
6836
6837 // Generate PACs using the 3 system instructions.
6838 __ Mov(x17, 0x0000000012345678);
6839 __ Pacia1716();
6840 __ Mov(x0, x17);
6841
6842 __ Mov(lr, 0x0000000012345678);
6843 __ Paciaz();
6844 __ Mov(x1, lr);
6845
6846 __ Mov(lr, 0x0000000012345678);
6847 __ Paciasp();
6848 __ Mov(x2, lr);
6849
6850 // Authenticate the pointers above.
6851 __ Mov(x17, x0);
6852 __ Autia1716();
6853 __ Mov(x3, x17);
6854
6855 __ Mov(lr, x1);
6856 __ Autiaz();
6857 __ Mov(x4, lr);
6858
6859 __ Mov(lr, x2);
6860 __ Autiasp();
6861 __ Mov(x5, lr);
6862
6863 // Attempt to authenticate incorrect pointers.
6864 __ Mov(x17, x1);
6865 __ Autia1716();
6866 __ Mov(x6, x17);
6867
6868 __ Mov(lr, x0);
6869 __ Autiaz();
6870 __ Mov(x7, lr);
6871
6872 __ Mov(lr, x1);
6873 __ Autiasp();
6874 __ Mov(x8, lr);
6875
6876 // Strip the pac code from the pointer in x0.
6877 __ Mov(lr, x0);
6878 __ Xpaclri();
6879 __ Mov(x9, lr);
6880
6881 // Restore stack pointer.
6882 __ Mov(sp, x20);
6883
6884 // Mask out just the PAC code bits.
6885 // TODO: use Simulator::CalculatePACMask in a nice way.
6886 __ And(x0, x0, 0x007f000000000000);
6887 __ And(x1, x1, 0x007f000000000000);
6888 __ And(x2, x2, 0x007f000000000000);
6889
6890 END();
6891
6892 if (CAN_RUN()) {
6893 RUN();
6894
6895 // Check PAC codes have been generated and aren't equal.
6896 // NOTE: with a different ComputePAC implementation, there may be a
6897 // collision.
6898 ASSERT_NOT_EQUAL_64(0, x0);
6899 ASSERT_NOT_EQUAL_64(0, x1);
6900 ASSERT_NOT_EQUAL_64(0, x2);
6901 ASSERT_NOT_EQUAL_64(x0, x1);
6902 ASSERT_EQUAL_64(x0, x2);
6903
6904 // Pointers correctly authenticated.
6905 ASSERT_EQUAL_64(0x0000000012345678, x3);
6906 ASSERT_EQUAL_64(0x0000000012345678, x4);
6907 ASSERT_EQUAL_64(0x0000000012345678, x5);
6908
6909 // Pointers corrupted after failing to authenticate.
6910 ASSERT_EQUAL_64(0x0020000012345678, x6);
6911 ASSERT_EQUAL_64(0x0020000012345678, x7);
6912 ASSERT_EQUAL_64(0x0020000012345678, x8);
6913
6914 // Pointer with code stripped.
6915 ASSERT_EQUAL_64(0x0000000012345678, x9);
6916 }
6917 }
6918
6919
TEST(system_pauth_b)6920 TEST(system_pauth_b) {
6921 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
6922 START();
6923
6924 // Exclude x16 and x17 from the scratch register list so we can use
6925 // Pac/Autia1716 safely.
6926 UseScratchRegisterScope temps(&masm);
6927 temps.Exclude(x16, x17);
6928 temps.Include(x10, x11);
6929
6930 // Backup stack pointer.
6931 __ Mov(x20, sp);
6932
6933 // Modifiers
6934 __ Mov(x16, 0x477d469dec0b8760);
6935 __ Mov(sp, 0x477d469dec0b8760);
6936
6937 // Generate PACs using the 3 system instructions.
6938 __ Mov(x17, 0x0000000012345678);
6939 __ Pacib1716();
6940 __ Mov(x0, x17);
6941
6942 __ Mov(lr, 0x0000000012345678);
6943 __ Pacibz();
6944 __ Mov(x1, lr);
6945
6946 __ Mov(lr, 0x0000000012345678);
6947 __ Pacibsp();
6948 __ Mov(x2, lr);
6949
6950 // Authenticate the pointers above.
6951 __ Mov(x17, x0);
6952 __ Autib1716();
6953 __ Mov(x3, x17);
6954
6955 __ Mov(lr, x1);
6956 __ Autibz();
6957 __ Mov(x4, lr);
6958
6959 __ Mov(lr, x2);
6960 __ Autibsp();
6961 __ Mov(x5, lr);
6962
6963 // Attempt to authenticate incorrect pointers.
6964 __ Mov(x17, x1);
6965 __ Autib1716();
6966 __ Mov(x6, x17);
6967
6968 __ Mov(lr, x0);
6969 __ Autibz();
6970 __ Mov(x7, lr);
6971
6972 __ Mov(lr, x1);
6973 __ Autibsp();
6974 __ Mov(x8, lr);
6975
6976 // Strip the pac code from the pointer in x0.
6977 __ Mov(lr, x0);
6978 __ Xpaclri();
6979 __ Mov(x9, lr);
6980
6981 // Restore stack pointer.
6982 __ Mov(sp, x20);
6983
6984 // Mask out just the PAC code bits.
6985 // TODO: use Simulator::CalculatePACMask in a nice way.
6986 __ And(x0, x0, 0x007f000000000000);
6987 __ And(x1, x1, 0x007f000000000000);
6988 __ And(x2, x2, 0x007f000000000000);
6989
6990 END();
6991
6992 if (CAN_RUN()) {
6993 RUN();
6994
6995 // Check PAC codes have been generated and aren't equal.
6996 // NOTE: with a different ComputePAC implementation, there may be a
6997 // collision.
6998 ASSERT_NOT_EQUAL_64(0, x0);
6999 ASSERT_NOT_EQUAL_64(0, x1);
7000 ASSERT_NOT_EQUAL_64(0, x2);
7001 ASSERT_NOT_EQUAL_64(x0, x1);
7002 ASSERT_EQUAL_64(x0, x2);
7003
7004 // Pointers correctly authenticated.
7005 ASSERT_EQUAL_64(0x0000000012345678, x3);
7006 ASSERT_EQUAL_64(0x0000000012345678, x4);
7007 ASSERT_EQUAL_64(0x0000000012345678, x5);
7008
7009 // Pointers corrupted after failing to authenticate.
7010 ASSERT_EQUAL_64(0x0040000012345678, x6);
7011 ASSERT_EQUAL_64(0x0040000012345678, x7);
7012 ASSERT_EQUAL_64(0x0040000012345678, x8);
7013
7014 // Pointer with code stripped.
7015 ASSERT_EQUAL_64(0x0000000012345678, x9);
7016 }
7017 }
7018
7019 #ifdef VIXL_NEGATIVE_TESTING
TEST(system_pauth_negative_test)7020 TEST(system_pauth_negative_test) {
7021 SETUP_WITH_FEATURES(CPUFeatures::kPAuth);
7022 START();
7023
7024 // Test for an assert (independent of order).
7025 MUST_FAIL_WITH_MESSAGE(__ Pacia1716(),
7026 "Assertion failed "
7027 "(!GetScratchRegisterList()->IncludesAliasOf(");
7028
7029 // Test for x16 assert.
7030 {
7031 UseScratchRegisterScope temps(&masm);
7032 temps.Exclude(x17);
7033 temps.Include(x16);
7034 MUST_FAIL_WITH_MESSAGE(__ Pacia1716(),
7035 "Assertion failed "
7036 "(!GetScratchRegisterList()->IncludesAliasOf(x16))");
7037 }
7038
7039 // Test for x17 assert.
7040 {
7041 UseScratchRegisterScope temps(&masm);
7042 temps.Exclude(x16);
7043 temps.Include(x17);
7044 MUST_FAIL_WITH_MESSAGE(__ Pacia1716(),
7045 "Assertion failed "
7046 "(!GetScratchRegisterList()->IncludesAliasOf(x17))");
7047 }
7048
7049 // Repeat first test for other 1716 instructions.
7050 MUST_FAIL_WITH_MESSAGE(__ Pacib1716(),
7051 "Assertion failed "
7052 "(!GetScratchRegisterList()->IncludesAliasOf(");
7053 MUST_FAIL_WITH_MESSAGE(__ Autia1716(),
7054 "Assertion failed "
7055 "(!GetScratchRegisterList()->IncludesAliasOf(");
7056 MUST_FAIL_WITH_MESSAGE(__ Autib1716(),
7057 "Assertion failed "
7058 "(!GetScratchRegisterList()->IncludesAliasOf(");
7059
7060 END();
7061 }
7062 #endif // VIXL_NEGATIVE_TESTING
7063
7064
TEST(system)7065 TEST(system) {
7066 // RegisterDump::Dump uses NEON.
7067 SETUP_WITH_FEATURES(CPUFeatures::kNEON, CPUFeatures::kRAS);
7068 RegisterDump before;
7069
7070 START();
7071 before.Dump(&masm);
7072 __ Nop();
7073 __ Esb();
7074 __ Csdb();
7075 END();
7076
7077 if (CAN_RUN()) {
7078 RUN();
7079
7080 ASSERT_EQUAL_REGISTERS(before);
7081 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7082 }
7083 }
7084
BtiHelper(Register ipreg)7085 static void BtiHelper(Register ipreg) {
7086 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7087
7088 Label jump_target, jump_call_target, call_target, done;
7089 START();
7090 UseScratchRegisterScope temps(&masm);
7091 temps.Exclude(ipreg);
7092 __ Adr(x0, &jump_target);
7093 __ Br(x0);
7094 __ Nop();
7095 __ Bind(&jump_target, EmitBTI_j);
7096 __ Adr(x0, &call_target);
7097 __ Blr(x0);
7098 __ Adr(ipreg, &jump_call_target);
7099 __ Blr(ipreg);
7100 __ Adr(lr, &done); // Make Ret return to done label.
7101 __ Br(ipreg);
7102 __ Bind(&call_target, EmitBTI_c);
7103 __ Ret();
7104 __ Bind(&jump_call_target, EmitBTI_jc);
7105 __ Ret();
7106 __ Bind(&done);
7107 END();
7108
7109 if (CAN_RUN()) {
7110 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7111 simulator.SetGuardedPages(true);
7112 #else
7113 VIXL_UNIMPLEMENTED();
7114 #endif
7115 RUN();
7116 }
7117 }
7118
TEST(bti)7119 TEST(bti) {
7120 BtiHelper(x16);
7121 BtiHelper(x17);
7122 }
7123
TEST(unguarded_bti_is_nop)7124 TEST(unguarded_bti_is_nop) {
7125 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7126
7127 Label start, none, c, j, jc;
7128 START();
7129 __ B(&start);
7130 __ Bind(&none, EmitBTI);
7131 __ Bind(&c, EmitBTI_c);
7132 __ Bind(&j, EmitBTI_j);
7133 __ Bind(&jc, EmitBTI_jc);
7134 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&none) == 4 * kInstructionSize);
7135 __ Ret();
7136
7137 Label jump_to_c, call_to_j;
7138 __ Bind(&start);
7139 __ Adr(x0, &none);
7140 __ Adr(lr, &jump_to_c);
7141 __ Br(x0);
7142
7143 __ Bind(&jump_to_c);
7144 __ Adr(x0, &c);
7145 __ Adr(lr, &call_to_j);
7146 __ Br(x0);
7147
7148 __ Bind(&call_to_j);
7149 __ Adr(x0, &j);
7150 __ Blr(x0);
7151 END();
7152
7153 if (CAN_RUN()) {
7154 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7155 simulator.SetGuardedPages(false);
7156 #else
7157 VIXL_UNIMPLEMENTED();
7158 #endif
7159 RUN();
7160 }
7161 }
7162
7163 #ifdef VIXL_NEGATIVE_TESTING
TEST(bti_jump_to_ip_unidentified)7164 TEST(bti_jump_to_ip_unidentified) {
7165 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7166
7167 START();
7168 UseScratchRegisterScope temps(&masm);
7169 temps.Exclude(x17);
7170 Label l;
7171 __ Adr(x17, &l);
7172 __ Br(x17);
7173 __ Nop();
7174 __ Bind(&l);
7175 __ Nop();
7176 END();
7177
7178 if (CAN_RUN()) {
7179 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7180 simulator.SetGuardedPages(true);
7181 #else
7182 VIXL_UNIMPLEMENTED();
7183 #endif
7184 MUST_FAIL_WITH_MESSAGE(RUN(),
7185 "Executing non-BTI instruction with wrong "
7186 "BType.");
7187 }
7188 }
7189
TEST(bti_jump_to_unidentified)7190 TEST(bti_jump_to_unidentified) {
7191 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7192
7193 START();
7194 Label l;
7195 __ Adr(x0, &l);
7196 __ Br(x0);
7197 __ Nop();
7198 __ Bind(&l);
7199 __ Nop();
7200 END();
7201
7202 if (CAN_RUN()) {
7203 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7204 simulator.SetGuardedPages(true);
7205 #else
7206 VIXL_UNIMPLEMENTED();
7207 #endif
7208 MUST_FAIL_WITH_MESSAGE(RUN(),
7209 "Executing non-BTI instruction with wrong "
7210 "BType.");
7211 }
7212 }
7213
TEST(bti_call_to_unidentified)7214 TEST(bti_call_to_unidentified) {
7215 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7216
7217 START();
7218 Label l;
7219 __ Adr(x0, &l);
7220 __ Blr(x0);
7221 __ Nop();
7222 __ Bind(&l);
7223 __ Nop();
7224 END();
7225
7226 if (CAN_RUN()) {
7227 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7228 simulator.SetGuardedPages(true);
7229 #else
7230 VIXL_UNIMPLEMENTED();
7231 #endif
7232 MUST_FAIL_WITH_MESSAGE(RUN(),
7233 "Executing non-BTI instruction with wrong "
7234 "BType.");
7235 }
7236 }
7237
TEST(bti_jump_to_c)7238 TEST(bti_jump_to_c) {
7239 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7240
7241 START();
7242 // Jumping to a "BTI c" target must fail.
7243 Label jump_target;
7244 __ Adr(x0, &jump_target);
7245 __ Br(x0);
7246 __ Nop();
7247 __ Bind(&jump_target, EmitBTI_c);
7248 __ Nop();
7249 END();
7250
7251 if (CAN_RUN()) {
7252 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7253 simulator.SetGuardedPages(true);
7254 #else
7255 VIXL_UNIMPLEMENTED();
7256 #endif
7257 MUST_FAIL_WITH_MESSAGE(RUN(), "Executing BTI c with wrong BType.");
7258 }
7259 }
7260
TEST(bti_call_to_j)7261 TEST(bti_call_to_j) {
7262 SETUP_WITH_FEATURES(CPUFeatures::kBTI);
7263
7264 START();
7265 // Calling a "BTI j" target must fail.
7266 Label call_target;
7267 __ Adr(x0, &call_target);
7268 __ Blr(x0);
7269 __ Nop();
7270 __ Bind(&call_target, EmitBTI_j);
7271 __ Nop();
7272 END();
7273
7274 if (CAN_RUN()) {
7275 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7276 simulator.SetGuardedPages(true);
7277 #else
7278 VIXL_UNIMPLEMENTED();
7279 #endif
7280 MUST_FAIL_WITH_MESSAGE(RUN(), "Executing BTI j with wrong BType.");
7281 }
7282 }
7283 #endif // VIXL_NEGATIVE_TESTING
7284
TEST(fall_through_bti)7285 TEST(fall_through_bti) {
7286 SETUP_WITH_FEATURES(CPUFeatures::kBTI, CPUFeatures::kPAuth);
7287
7288 START();
7289 Label target, target_j, target_c, target_jc;
7290 __ Mov(x0, 0); // 'Normal' instruction sets BTYPE to zero.
7291 __ Bind(&target, EmitBTI);
7292 __ Add(x0, x0, 1);
7293 __ Bind(&target_j, EmitBTI_j);
7294 __ Add(x0, x0, 1);
7295 __ Bind(&target_c, EmitBTI_c);
7296 __ Add(x0, x0, 1);
7297 __ Bind(&target_jc, EmitBTI_jc);
7298 __ Add(x0, x0, 1);
7299 __ Paciasp();
7300 END();
7301
7302 if (CAN_RUN()) {
7303 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
7304 simulator.SetGuardedPages(true);
7305 #else
7306 VIXL_UNIMPLEMENTED();
7307 #endif
7308 RUN();
7309
7310 ASSERT_EQUAL_64(4, x0);
7311 }
7312 }
7313
TEST(zero_dest)7314 TEST(zero_dest) {
7315 // RegisterDump::Dump uses NEON.
7316 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
7317 RegisterDump before;
7318
7319 START();
7320 // Preserve the stack pointer, in case we clobber it.
7321 __ Mov(x30, sp);
7322 // Initialize the other registers used in this test.
7323 uint64_t literal_base = 0x0100001000100101;
7324 __ Mov(x0, 0);
7325 __ Mov(x1, literal_base);
7326 for (unsigned i = 2; i < x30.GetCode(); i++) {
7327 __ Add(XRegister(i), XRegister(i - 1), x1);
7328 }
7329 before.Dump(&masm);
7330
7331 // All of these instructions should be NOPs in these forms, but have
7332 // alternate forms which can write into the stack pointer.
7333 {
7334 ExactAssemblyScope scope(&masm, 3 * 7 * kInstructionSize);
7335 __ add(xzr, x0, x1);
7336 __ add(xzr, x1, xzr);
7337 __ add(xzr, xzr, x1);
7338
7339 __ and_(xzr, x0, x2);
7340 __ and_(xzr, x2, xzr);
7341 __ and_(xzr, xzr, x2);
7342
7343 __ bic(xzr, x0, x3);
7344 __ bic(xzr, x3, xzr);
7345 __ bic(xzr, xzr, x3);
7346
7347 __ eon(xzr, x0, x4);
7348 __ eon(xzr, x4, xzr);
7349 __ eon(xzr, xzr, x4);
7350
7351 __ eor(xzr, x0, x5);
7352 __ eor(xzr, x5, xzr);
7353 __ eor(xzr, xzr, x5);
7354
7355 __ orr(xzr, x0, x6);
7356 __ orr(xzr, x6, xzr);
7357 __ orr(xzr, xzr, x6);
7358
7359 __ sub(xzr, x0, x7);
7360 __ sub(xzr, x7, xzr);
7361 __ sub(xzr, xzr, x7);
7362 }
7363
7364 // Swap the saved stack pointer with the real one. If sp was written
7365 // during the test, it will show up in x30. This is done because the test
7366 // framework assumes that sp will be valid at the end of the test.
7367 __ Mov(x29, x30);
7368 __ Mov(x30, sp);
7369 __ Mov(sp, x29);
7370 // We used x29 as a scratch register, so reset it to make sure it doesn't
7371 // trigger a test failure.
7372 __ Add(x29, x28, x1);
7373 END();
7374
7375 if (CAN_RUN()) {
7376 RUN();
7377
7378 ASSERT_EQUAL_REGISTERS(before);
7379 ASSERT_EQUAL_NZCV(before.flags_nzcv());
7380 }
7381 }
7382
7383
TEST(zero_dest_setflags)7384 TEST(zero_dest_setflags) {
7385 // RegisterDump::Dump uses NEON.
7386 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
7387 RegisterDump before;
7388
7389 START();
7390 // Preserve the stack pointer, in case we clobber it.
7391 __ Mov(x30, sp);
7392 // Initialize the other registers used in this test.
7393 uint64_t literal_base = 0x0100001000100101;
7394 __ Mov(x0, 0);
7395 __ Mov(x1, literal_base);
7396 for (int i = 2; i < 30; i++) {
7397 __ Add(XRegister(i), XRegister(i - 1), x1);
7398 }
7399 before.Dump(&masm);
7400
7401 // All of these instructions should only write to the flags in these forms,
7402 // but have alternate forms which can write into the stack pointer.
7403 {
7404 ExactAssemblyScope scope(&masm, 6 * kInstructionSize);
7405 __ adds(xzr, x0, Operand(x1, UXTX));
7406 __ adds(xzr, x1, Operand(xzr, UXTX));
7407 __ adds(xzr, x1, 1234);
7408 __ adds(xzr, x0, x1);
7409 __ adds(xzr, x1, xzr);
7410 __ adds(xzr, xzr, x1);
7411 }
7412
7413 {
7414 ExactAssemblyScope scope(&masm, 5 * kInstructionSize);
7415 __ ands(xzr, x2, ~0xf);
7416 __ ands(xzr, xzr, ~0xf);
7417 __ ands(xzr, x0, x2);
7418 __ ands(xzr, x2, xzr);
7419 __ ands(xzr, xzr, x2);
7420 }
7421
7422 {
7423 ExactAssemblyScope scope(&masm, 5 * kInstructionSize);
7424 __ bics(xzr, x3, ~0xf);
7425 __ bics(xzr, xzr, ~0xf);
7426 __ bics(xzr, x0, x3);
7427 __ bics(xzr, x3, xzr);
7428 __ bics(xzr, xzr, x3);
7429 }
7430
7431 {
7432 ExactAssemblyScope scope(&masm, 6 * kInstructionSize);
7433 __ subs(xzr, x0, Operand(x3, UXTX));
7434 __ subs(xzr, x3, Operand(xzr, UXTX));
7435 __ subs(xzr, x3, 1234);
7436 __ subs(xzr, x0, x3);
7437 __ subs(xzr, x3, xzr);
7438 __ subs(xzr, xzr, x3);
7439 }
7440
7441 // Swap the saved stack pointer with the real one. If sp was written
7442 // during the test, it will show up in x30. This is done because the test
7443 // framework assumes that sp will be valid at the end of the test.
7444 __ Mov(x29, x30);
7445 __ Mov(x30, sp);
7446 __ Mov(sp, x29);
7447 // We used x29 as a scratch register, so reset it to make sure it doesn't
7448 // trigger a test failure.
7449 __ Add(x29, x28, x1);
7450 END();
7451
7452 if (CAN_RUN()) {
7453 RUN();
7454
7455 ASSERT_EQUAL_REGISTERS(before);
7456 }
7457 }
7458
7459
TEST(stack_pointer_override)7460 TEST(stack_pointer_override) {
7461 // This test generates some stack maintenance code, but the test only checks
7462 // the reported state.
7463 SETUP();
7464 START();
7465
7466 // The default stack pointer in VIXL is sp.
7467 VIXL_CHECK(sp.Is(__ StackPointer()));
7468 __ SetStackPointer(x0);
7469 VIXL_CHECK(x0.Is(__ StackPointer()));
7470 __ SetStackPointer(x28);
7471 VIXL_CHECK(x28.Is(__ StackPointer()));
7472 __ SetStackPointer(sp);
7473 VIXL_CHECK(sp.Is(__ StackPointer()));
7474
7475 END();
7476 if (CAN_RUN()) {
7477 RUN();
7478 }
7479 }
7480
7481
TEST(peek_poke_simple)7482 TEST(peek_poke_simple) {
7483 SETUP();
7484 START();
7485
7486 static const RegList x0_to_x3 =
7487 x0.GetBit() | x1.GetBit() | x2.GetBit() | x3.GetBit();
7488 static const RegList x10_to_x13 =
7489 x10.GetBit() | x11.GetBit() | x12.GetBit() | x13.GetBit();
7490
7491 // The literal base is chosen to have two useful properties:
7492 // * When multiplied by small values (such as a register index), this value
7493 // is clearly readable in the result.
7494 // * The value is not formed from repeating fixed-size smaller values, so it
7495 // can be used to detect endianness-related errors.
7496 uint64_t literal_base = 0x0100001000100101;
7497
7498 // Initialize the registers.
7499 __ Mov(x0, literal_base);
7500 __ Add(x1, x0, x0);
7501 __ Add(x2, x1, x0);
7502 __ Add(x3, x2, x0);
7503
7504 __ Claim(32);
7505
7506 // Simple exchange.
7507 // After this test:
7508 // x0-x3 should be unchanged.
7509 // w10-w13 should contain the lower words of x0-x3.
7510 __ Poke(x0, 0);
7511 __ Poke(x1, 8);
7512 __ Poke(x2, 16);
7513 __ Poke(x3, 24);
7514 Clobber(&masm, x0_to_x3);
7515 __ Peek(x0, 0);
7516 __ Peek(x1, 8);
7517 __ Peek(x2, 16);
7518 __ Peek(x3, 24);
7519
7520 __ Poke(w0, 0);
7521 __ Poke(w1, 4);
7522 __ Poke(w2, 8);
7523 __ Poke(w3, 12);
7524 Clobber(&masm, x10_to_x13);
7525 __ Peek(w10, 0);
7526 __ Peek(w11, 4);
7527 __ Peek(w12, 8);
7528 __ Peek(w13, 12);
7529
7530 __ Drop(32);
7531
7532 END();
7533 if (CAN_RUN()) {
7534 RUN();
7535
7536 ASSERT_EQUAL_64(literal_base * 1, x0);
7537 ASSERT_EQUAL_64(literal_base * 2, x1);
7538 ASSERT_EQUAL_64(literal_base * 3, x2);
7539 ASSERT_EQUAL_64(literal_base * 4, x3);
7540
7541 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7542 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7543 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7544 ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
7545 }
7546 }
7547
7548
TEST(peek_poke_unaligned)7549 TEST(peek_poke_unaligned) {
7550 SETUP();
7551 START();
7552
7553 // The literal base is chosen to have two useful properties:
7554 // * When multiplied by small values (such as a register index), this value
7555 // is clearly readable in the result.
7556 // * The value is not formed from repeating fixed-size smaller values, so it
7557 // can be used to detect endianness-related errors.
7558 uint64_t literal_base = 0x0100001000100101;
7559
7560 // Initialize the registers.
7561 __ Mov(x0, literal_base);
7562 __ Add(x1, x0, x0);
7563 __ Add(x2, x1, x0);
7564 __ Add(x3, x2, x0);
7565 __ Add(x4, x3, x0);
7566 __ Add(x5, x4, x0);
7567 __ Add(x6, x5, x0);
7568
7569 __ Claim(32);
7570
7571 // Unaligned exchanges.
7572 // After this test:
7573 // x0-x6 should be unchanged.
7574 // w10-w12 should contain the lower words of x0-x2.
7575 __ Poke(x0, 1);
7576 Clobber(&masm, x0.GetBit());
7577 __ Peek(x0, 1);
7578 __ Poke(x1, 2);
7579 Clobber(&masm, x1.GetBit());
7580 __ Peek(x1, 2);
7581 __ Poke(x2, 3);
7582 Clobber(&masm, x2.GetBit());
7583 __ Peek(x2, 3);
7584 __ Poke(x3, 4);
7585 Clobber(&masm, x3.GetBit());
7586 __ Peek(x3, 4);
7587 __ Poke(x4, 5);
7588 Clobber(&masm, x4.GetBit());
7589 __ Peek(x4, 5);
7590 __ Poke(x5, 6);
7591 Clobber(&masm, x5.GetBit());
7592 __ Peek(x5, 6);
7593 __ Poke(x6, 7);
7594 Clobber(&masm, x6.GetBit());
7595 __ Peek(x6, 7);
7596
7597 __ Poke(w0, 1);
7598 Clobber(&masm, w10.GetBit());
7599 __ Peek(w10, 1);
7600 __ Poke(w1, 2);
7601 Clobber(&masm, w11.GetBit());
7602 __ Peek(w11, 2);
7603 __ Poke(w2, 3);
7604 Clobber(&masm, w12.GetBit());
7605 __ Peek(w12, 3);
7606
7607 __ Drop(32);
7608
7609 END();
7610 if (CAN_RUN()) {
7611 RUN();
7612
7613 ASSERT_EQUAL_64(literal_base * 1, x0);
7614 ASSERT_EQUAL_64(literal_base * 2, x1);
7615 ASSERT_EQUAL_64(literal_base * 3, x2);
7616 ASSERT_EQUAL_64(literal_base * 4, x3);
7617 ASSERT_EQUAL_64(literal_base * 5, x4);
7618 ASSERT_EQUAL_64(literal_base * 6, x5);
7619 ASSERT_EQUAL_64(literal_base * 7, x6);
7620
7621 ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
7622 ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
7623 ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
7624 }
7625 }
7626
7627
TEST(peek_poke_endianness)7628 TEST(peek_poke_endianness) {
7629 SETUP();
7630 START();
7631
7632 // The literal base is chosen to have two useful properties:
7633 // * When multiplied by small values (such as a register index), this value
7634 // is clearly readable in the result.
7635 // * The value is not formed from repeating fixed-size smaller values, so it
7636 // can be used to detect endianness-related errors.
7637 uint64_t literal_base = 0x0100001000100101;
7638
7639 // Initialize the registers.
7640 __ Mov(x0, literal_base);
7641 __ Add(x1, x0, x0);
7642
7643 __ Claim(32);
7644
7645 // Endianness tests.
7646 // After this section:
7647 // x4 should match x0[31:0]:x0[63:32]
7648 // w5 should match w1[15:0]:w1[31:16]
7649 __ Poke(x0, 0);
7650 __ Poke(x0, 8);
7651 __ Peek(x4, 4);
7652
7653 __ Poke(w1, 0);
7654 __ Poke(w1, 4);
7655 __ Peek(w5, 2);
7656
7657 __ Drop(32);
7658
7659 END();
7660 if (CAN_RUN()) {
7661 RUN();
7662
7663 uint64_t x0_expected = literal_base * 1;
7664 uint64_t x1_expected = literal_base * 2;
7665 uint64_t x4_expected = (x0_expected << 32) | (x0_expected >> 32);
7666 uint64_t x5_expected =
7667 ((x1_expected << 16) & 0xffff0000) | ((x1_expected >> 16) & 0x0000ffff);
7668
7669 ASSERT_EQUAL_64(x0_expected, x0);
7670 ASSERT_EQUAL_64(x1_expected, x1);
7671 ASSERT_EQUAL_64(x4_expected, x4);
7672 ASSERT_EQUAL_64(x5_expected, x5);
7673 }
7674 }
7675
7676
TEST(peek_poke_mixed)7677 TEST(peek_poke_mixed) {
7678 SETUP();
7679 START();
7680
7681 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
7682 UseScratchRegisterScope temps(&masm);
7683 temps.ExcludeAll();
7684
7685 // The literal base is chosen to have two useful properties:
7686 // * When multiplied by small values (such as a register index), this value
7687 // is clearly readable in the result.
7688 // * The value is not formed from repeating fixed-size smaller values, so it
7689 // can be used to detect endianness-related errors.
7690 uint64_t literal_base = 0x0100001000100101;
7691
7692 // Initialize the registers.
7693 __ Mov(x0, literal_base);
7694 __ Add(x1, x0, x0);
7695 __ Add(x2, x1, x0);
7696 __ Add(x3, x2, x0);
7697
7698 __ Claim(32);
7699
7700 // Mix with other stack operations.
7701 // After this section:
7702 // x0-x3 should be unchanged.
7703 // x6 should match x1[31:0]:x0[63:32]
7704 // w7 should match x1[15:0]:x0[63:48]
7705 __ Poke(x1, 8);
7706 __ Poke(x0, 0);
7707 {
7708 VIXL_ASSERT(__ StackPointer().Is(sp));
7709 __ Mov(x4, __ StackPointer());
7710 __ SetStackPointer(x4);
7711
7712 __ Poke(wzr, 0); // Clobber the space we're about to drop.
7713 __ Drop(4);
7714 __ Peek(x6, 0);
7715 __ Claim(8);
7716 __ Peek(w7, 10);
7717 __ Poke(x3, 28);
7718 __ Poke(xzr, 0); // Clobber the space we're about to drop.
7719 __ Drop(8);
7720 __ Poke(x2, 12);
7721 __ Push(w0);
7722
7723 __ Mov(sp, __ StackPointer());
7724 __ SetStackPointer(sp);
7725 }
7726
7727 __ Pop(x0, x1, x2, x3);
7728
7729 END();
7730 if (CAN_RUN()) {
7731 RUN();
7732
7733 uint64_t x0_expected = literal_base * 1;
7734 uint64_t x1_expected = literal_base * 2;
7735 uint64_t x2_expected = literal_base * 3;
7736 uint64_t x3_expected = literal_base * 4;
7737 uint64_t x6_expected = (x1_expected << 32) | (x0_expected >> 32);
7738 uint64_t x7_expected =
7739 ((x1_expected << 16) & 0xffff0000) | ((x0_expected >> 48) & 0x0000ffff);
7740
7741 ASSERT_EQUAL_64(x0_expected, x0);
7742 ASSERT_EQUAL_64(x1_expected, x1);
7743 ASSERT_EQUAL_64(x2_expected, x2);
7744 ASSERT_EQUAL_64(x3_expected, x3);
7745 ASSERT_EQUAL_64(x6_expected, x6);
7746 ASSERT_EQUAL_64(x7_expected, x7);
7747 }
7748 }
7749
7750
TEST(peek_poke_reglist)7751 TEST(peek_poke_reglist) {
7752 SETUP_WITH_FEATURES(CPUFeatures::kFP);
7753
7754 START();
7755
7756 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
7757 UseScratchRegisterScope temps(&masm);
7758 temps.ExcludeAll();
7759
7760 // The literal base is chosen to have two useful properties:
7761 // * When multiplied by small values (such as a register index), this value
7762 // is clearly readable in the result.
7763 // * The value is not formed from repeating fixed-size smaller values, so it
7764 // can be used to detect endianness-related errors.
7765 uint64_t base = 0x0100001000100101;
7766
7767 // Initialize the registers.
7768 __ Mov(x1, base);
7769 __ Add(x2, x1, x1);
7770 __ Add(x3, x2, x1);
7771 __ Add(x4, x3, x1);
7772
7773 CPURegList list_1(x1, x2, x3, x4);
7774 CPURegList list_2(x11, x12, x13, x14);
7775 int list_1_size = list_1.GetTotalSizeInBytes();
7776
7777 __ Claim(2 * list_1_size);
7778
7779 __ PokeCPURegList(list_1, 0);
7780 __ PokeXRegList(list_1.GetList(), list_1_size);
7781 __ PeekCPURegList(list_2, 2 * kXRegSizeInBytes);
7782 __ PeekXRegList(x15.GetBit(), kWRegSizeInBytes);
7783 __ PeekWRegList(w16.GetBit() | w17.GetBit(), 3 * kXRegSizeInBytes);
7784
7785 __ Drop(2 * list_1_size);
7786
7787
7788 uint64_t base_d = 0x1010010001000010;
7789
7790 // Initialize the registers.
7791 __ Mov(x1, base_d);
7792 __ Add(x2, x1, x1);
7793 __ Add(x3, x2, x1);
7794 __ Add(x4, x3, x1);
7795 __ Fmov(d1, x1);
7796 __ Fmov(d2, x2);
7797 __ Fmov(d3, x3);
7798 __ Fmov(d4, x4);
7799
7800 CPURegList list_d_1(d1, d2, d3, d4);
7801 CPURegList list_d_2(d11, d12, d13, d14);
7802 int list_d_1_size = list_d_1.GetTotalSizeInBytes();
7803
7804 __ Claim(2 * list_d_1_size);
7805
7806 __ PokeCPURegList(list_d_1, 0);
7807 __ PokeDRegList(list_d_1.GetList(), list_d_1_size);
7808 __ PeekCPURegList(list_d_2, 2 * kDRegSizeInBytes);
7809 __ PeekDRegList(d15.GetBit(), kSRegSizeInBytes);
7810 __ PeekSRegList(s16.GetBit() | s17.GetBit(), 3 * kDRegSizeInBytes);
7811
7812 __ Drop(2 * list_d_1_size);
7813
7814
7815 END();
7816 if (CAN_RUN()) {
7817 RUN();
7818
7819 ASSERT_EQUAL_64(3 * base, x11);
7820 ASSERT_EQUAL_64(4 * base, x12);
7821 ASSERT_EQUAL_64(1 * base, x13);
7822 ASSERT_EQUAL_64(2 * base, x14);
7823 ASSERT_EQUAL_64(((1 * base) >> kWRegSize) | ((2 * base) << kWRegSize), x15);
7824 ASSERT_EQUAL_64(2 * base, x14);
7825 ASSERT_EQUAL_32((4 * base) & kWRegMask, w16);
7826 ASSERT_EQUAL_32((4 * base) >> kWRegSize, w17);
7827
7828 ASSERT_EQUAL_FP64(RawbitsToDouble(3 * base_d), d11);
7829 ASSERT_EQUAL_FP64(RawbitsToDouble(4 * base_d), d12);
7830 ASSERT_EQUAL_FP64(RawbitsToDouble(1 * base_d), d13);
7831 ASSERT_EQUAL_FP64(RawbitsToDouble(2 * base_d), d14);
7832 ASSERT_EQUAL_FP64(RawbitsToDouble((base_d >> kSRegSize) |
7833 ((2 * base_d) << kSRegSize)),
7834 d15);
7835 ASSERT_EQUAL_FP64(RawbitsToDouble(2 * base_d), d14);
7836 ASSERT_EQUAL_FP32(RawbitsToFloat((4 * base_d) & kSRegMask), s16);
7837 ASSERT_EQUAL_FP32(RawbitsToFloat((4 * base_d) >> kSRegSize), s17);
7838 }
7839 }
7840
7841
TEST(load_store_reglist)7842 TEST(load_store_reglist) {
7843 SETUP_WITH_FEATURES(CPUFeatures::kFP);
7844
7845 START();
7846
7847 // The literal base is chosen to have two useful properties:
7848 // * When multiplied by small values (such as a register index), this value
7849 // is clearly readable in the result.
7850 // * The value is not formed from repeating fixed-size smaller values, so it
7851 // can be used to detect endianness-related errors.
7852 uint64_t high_base = UINT32_C(0x01000010);
7853 uint64_t low_base = UINT32_C(0x00100101);
7854 uint64_t base = (high_base << 32) | low_base;
7855 uint64_t array[21];
7856 memset(array, 0, sizeof(array));
7857
7858 // Initialize the registers.
7859 __ Mov(x1, base);
7860 __ Add(x2, x1, x1);
7861 __ Add(x3, x2, x1);
7862 __ Add(x4, x3, x1);
7863 __ Fmov(d1, x1);
7864 __ Fmov(d2, x2);
7865 __ Fmov(d3, x3);
7866 __ Fmov(d4, x4);
7867 __ Fmov(d5, x1);
7868 __ Fmov(d6, x2);
7869 __ Fmov(d7, x3);
7870 __ Fmov(d8, x4);
7871
7872 Register reg_base = x20;
7873 Register reg_index = x21;
7874 int size_stored = 0;
7875
7876 __ Mov(reg_base, reinterpret_cast<uintptr_t>(&array));
7877
7878 // Test aligned accesses.
7879 CPURegList list_src(w1, w2, w3, w4);
7880 CPURegList list_dst(w11, w12, w13, w14);
7881 CPURegList list_fp_src_1(d1, d2, d3, d4);
7882 CPURegList list_fp_dst_1(d11, d12, d13, d14);
7883
7884 __ StoreCPURegList(list_src, MemOperand(reg_base, 0 * sizeof(uint64_t)));
7885 __ LoadCPURegList(list_dst, MemOperand(reg_base, 0 * sizeof(uint64_t)));
7886 size_stored += 4 * kWRegSizeInBytes;
7887
7888 __ Mov(reg_index, size_stored);
7889 __ StoreCPURegList(list_src, MemOperand(reg_base, reg_index));
7890 __ LoadCPURegList(list_dst, MemOperand(reg_base, reg_index));
7891 size_stored += 4 * kWRegSizeInBytes;
7892
7893 __ StoreCPURegList(list_fp_src_1, MemOperand(reg_base, size_stored));
7894 __ LoadCPURegList(list_fp_dst_1, MemOperand(reg_base, size_stored));
7895 size_stored += 4 * kDRegSizeInBytes;
7896
7897 __ Mov(reg_index, size_stored);
7898 __ StoreCPURegList(list_fp_src_1, MemOperand(reg_base, reg_index));
7899 __ LoadCPURegList(list_fp_dst_1, MemOperand(reg_base, reg_index));
7900 size_stored += 4 * kDRegSizeInBytes;
7901
7902 // Test unaligned accesses.
7903 CPURegList list_fp_src_2(d5, d6, d7, d8);
7904 CPURegList list_fp_dst_2(d15, d16, d17, d18);
7905
7906 __ Str(wzr, MemOperand(reg_base, size_stored));
7907 size_stored += 1 * kWRegSizeInBytes;
7908 __ StoreCPURegList(list_fp_src_2, MemOperand(reg_base, size_stored));
7909 __ LoadCPURegList(list_fp_dst_2, MemOperand(reg_base, size_stored));
7910 size_stored += 4 * kDRegSizeInBytes;
7911
7912 __ Mov(reg_index, size_stored);
7913 __ StoreCPURegList(list_fp_src_2, MemOperand(reg_base, reg_index));
7914 __ LoadCPURegList(list_fp_dst_2, MemOperand(reg_base, reg_index));
7915
7916 END();
7917 if (CAN_RUN()) {
7918 RUN();
7919
7920 VIXL_CHECK(array[0] == (1 * low_base) + (2 * low_base << kWRegSize));
7921 VIXL_CHECK(array[1] == (3 * low_base) + (4 * low_base << kWRegSize));
7922 VIXL_CHECK(array[2] == (1 * low_base) + (2 * low_base << kWRegSize));
7923 VIXL_CHECK(array[3] == (3 * low_base) + (4 * low_base << kWRegSize));
7924 VIXL_CHECK(array[4] == 1 * base);
7925 VIXL_CHECK(array[5] == 2 * base);
7926 VIXL_CHECK(array[6] == 3 * base);
7927 VIXL_CHECK(array[7] == 4 * base);
7928 VIXL_CHECK(array[8] == 1 * base);
7929 VIXL_CHECK(array[9] == 2 * base);
7930 VIXL_CHECK(array[10] == 3 * base);
7931 VIXL_CHECK(array[11] == 4 * base);
7932 VIXL_CHECK(array[12] == ((1 * low_base) << kSRegSize));
7933 VIXL_CHECK(array[13] == (((2 * low_base) << kSRegSize) | (1 * high_base)));
7934 VIXL_CHECK(array[14] == (((3 * low_base) << kSRegSize) | (2 * high_base)));
7935 VIXL_CHECK(array[15] == (((4 * low_base) << kSRegSize) | (3 * high_base)));
7936 VIXL_CHECK(array[16] == (((1 * low_base) << kSRegSize) | (4 * high_base)));
7937 VIXL_CHECK(array[17] == (((2 * low_base) << kSRegSize) | (1 * high_base)));
7938 VIXL_CHECK(array[18] == (((3 * low_base) << kSRegSize) | (2 * high_base)));
7939 VIXL_CHECK(array[19] == (((4 * low_base) << kSRegSize) | (3 * high_base)));
7940 VIXL_CHECK(array[20] == (4 * high_base));
7941
7942 ASSERT_EQUAL_64(1 * low_base, x11);
7943 ASSERT_EQUAL_64(2 * low_base, x12);
7944 ASSERT_EQUAL_64(3 * low_base, x13);
7945 ASSERT_EQUAL_64(4 * low_base, x14);
7946 ASSERT_EQUAL_FP64(RawbitsToDouble(1 * base), d11);
7947 ASSERT_EQUAL_FP64(RawbitsToDouble(2 * base), d12);
7948 ASSERT_EQUAL_FP64(RawbitsToDouble(3 * base), d13);
7949 ASSERT_EQUAL_FP64(RawbitsToDouble(4 * base), d14);
7950 ASSERT_EQUAL_FP64(RawbitsToDouble(1 * base), d15);
7951 ASSERT_EQUAL_FP64(RawbitsToDouble(2 * base), d16);
7952 ASSERT_EQUAL_FP64(RawbitsToDouble(3 * base), d17);
7953 ASSERT_EQUAL_FP64(RawbitsToDouble(4 * base), d18);
7954 }
7955 }
7956
7957
7958 // This enum is used only as an argument to the push-pop test helpers.
7959 enum PushPopMethod {
7960 // Push or Pop using the Push and Pop methods, with blocks of up to four
7961 // registers. (Smaller blocks will be used if necessary.)
7962 PushPopByFour,
7963
7964 // Use Push<Size>RegList and Pop<Size>RegList to transfer the registers.
7965 PushPopRegList
7966 };
7967
7968
7969 // For the PushPop* tests, use the maximum number of registers that the test
7970 // supports (where a reg_count argument would otherwise be provided).
7971 static int const kPushPopUseMaxRegCount = -1;
7972
7973 // Test a simple push-pop pattern:
7974 // * Claim <claim> bytes to set the stack alignment.
7975 // * Push <reg_count> registers with size <reg_size>.
7976 // * Clobber the register contents.
7977 // * Pop <reg_count> registers to restore the original contents.
7978 // * Drop <claim> bytes to restore the original stack pointer.
7979 //
7980 // Different push and pop methods can be specified independently to test for
7981 // proper word-endian behaviour.
PushPopSimpleHelper(int reg_count,int claim,int reg_size,PushPopMethod push_method,PushPopMethod pop_method)7982 static void PushPopSimpleHelper(int reg_count,
7983 int claim,
7984 int reg_size,
7985 PushPopMethod push_method,
7986 PushPopMethod pop_method) {
7987 SETUP();
7988
7989 START();
7990
7991 // Arbitrarily pick a register to use as a stack pointer.
7992 const Register& stack_pointer = x20;
7993 const RegList allowed = ~stack_pointer.GetBit();
7994 if (reg_count == kPushPopUseMaxRegCount) {
7995 reg_count = CountSetBits(allowed, kNumberOfRegisters);
7996 }
7997 // Work out which registers to use, based on reg_size.
7998 Register r[kNumberOfRegisters];
7999 Register x[kNumberOfRegisters];
8000 RegList list =
8001 PopulateRegisterArray(NULL, x, r, reg_size, reg_count, allowed);
8002
8003 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
8004 UseScratchRegisterScope temps(&masm);
8005 temps.ExcludeAll();
8006
8007 // The literal base is chosen to have two useful properties:
8008 // * When multiplied by small values (such as a register index), this value
8009 // is clearly readable in the result.
8010 // * The value is not formed from repeating fixed-size smaller values, so it
8011 // can be used to detect endianness-related errors.
8012 uint64_t literal_base = 0x0100001000100101;
8013
8014 {
8015 VIXL_ASSERT(__ StackPointer().Is(sp));
8016 __ Mov(stack_pointer, __ StackPointer());
8017 __ SetStackPointer(stack_pointer);
8018
8019 int i;
8020
8021 // Initialize the registers.
8022 for (i = 0; i < reg_count; i++) {
8023 // Always write into the X register, to ensure that the upper word is
8024 // properly ignored by Push when testing W registers.
8025 __ Mov(x[i], literal_base * i);
8026 }
8027
8028 // Claim memory first, as requested.
8029 __ Claim(claim);
8030
8031 switch (push_method) {
8032 case PushPopByFour:
8033 // Push high-numbered registers first (to the highest addresses).
8034 for (i = reg_count; i >= 4; i -= 4) {
8035 __ Push(r[i - 1], r[i - 2], r[i - 3], r[i - 4]);
8036 }
8037 // Finish off the leftovers.
8038 switch (i) {
8039 case 3:
8040 __ Push(r[2], r[1], r[0]);
8041 break;
8042 case 2:
8043 __ Push(r[1], r[0]);
8044 break;
8045 case 1:
8046 __ Push(r[0]);
8047 break;
8048 default:
8049 VIXL_ASSERT(i == 0);
8050 break;
8051 }
8052 break;
8053 case PushPopRegList:
8054 __ PushSizeRegList(list, reg_size);
8055 break;
8056 }
8057
8058 // Clobber all the registers, to ensure that they get repopulated by Pop.
8059 Clobber(&masm, list);
8060
8061 switch (pop_method) {
8062 case PushPopByFour:
8063 // Pop low-numbered registers first (from the lowest addresses).
8064 for (i = 0; i <= (reg_count - 4); i += 4) {
8065 __ Pop(r[i], r[i + 1], r[i + 2], r[i + 3]);
8066 }
8067 // Finish off the leftovers.
8068 switch (reg_count - i) {
8069 case 3:
8070 __ Pop(r[i], r[i + 1], r[i + 2]);
8071 break;
8072 case 2:
8073 __ Pop(r[i], r[i + 1]);
8074 break;
8075 case 1:
8076 __ Pop(r[i]);
8077 break;
8078 default:
8079 VIXL_ASSERT(i == reg_count);
8080 break;
8081 }
8082 break;
8083 case PushPopRegList:
8084 __ PopSizeRegList(list, reg_size);
8085 break;
8086 }
8087
8088 // Drop memory to restore stack_pointer.
8089 __ Drop(claim);
8090
8091 __ Mov(sp, __ StackPointer());
8092 __ SetStackPointer(sp);
8093 }
8094
8095 END();
8096
8097 if (CAN_RUN()) {
8098 RUN();
8099
8100 // Check that the register contents were preserved.
8101 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8102 // that the upper word was properly cleared by Pop.
8103 literal_base &= (0xffffffffffffffff >> (64 - reg_size));
8104 for (int i = 0; i < reg_count; i++) {
8105 if (x[i].Is(xzr)) {
8106 ASSERT_EQUAL_64(0, x[i]);
8107 } else {
8108 ASSERT_EQUAL_64(literal_base * i, x[i]);
8109 }
8110 }
8111 }
8112 }
8113
8114
TEST(push_pop_xreg_simple_32)8115 TEST(push_pop_xreg_simple_32) {
8116 for (int claim = 0; claim <= 8; claim++) {
8117 for (int count = 0; count <= 8; count++) {
8118 PushPopSimpleHelper(count,
8119 claim,
8120 kWRegSize,
8121 PushPopByFour,
8122 PushPopByFour);
8123 PushPopSimpleHelper(count,
8124 claim,
8125 kWRegSize,
8126 PushPopByFour,
8127 PushPopRegList);
8128 PushPopSimpleHelper(count,
8129 claim,
8130 kWRegSize,
8131 PushPopRegList,
8132 PushPopByFour);
8133 PushPopSimpleHelper(count,
8134 claim,
8135 kWRegSize,
8136 PushPopRegList,
8137 PushPopRegList);
8138 }
8139 // Test with the maximum number of registers.
8140 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8141 claim,
8142 kWRegSize,
8143 PushPopByFour,
8144 PushPopByFour);
8145 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8146 claim,
8147 kWRegSize,
8148 PushPopByFour,
8149 PushPopRegList);
8150 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8151 claim,
8152 kWRegSize,
8153 PushPopRegList,
8154 PushPopByFour);
8155 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8156 claim,
8157 kWRegSize,
8158 PushPopRegList,
8159 PushPopRegList);
8160 }
8161 }
8162
8163
TEST(push_pop_xreg_simple_64)8164 TEST(push_pop_xreg_simple_64) {
8165 for (int claim = 0; claim <= 8; claim++) {
8166 for (int count = 0; count <= 8; count++) {
8167 PushPopSimpleHelper(count,
8168 claim,
8169 kXRegSize,
8170 PushPopByFour,
8171 PushPopByFour);
8172 PushPopSimpleHelper(count,
8173 claim,
8174 kXRegSize,
8175 PushPopByFour,
8176 PushPopRegList);
8177 PushPopSimpleHelper(count,
8178 claim,
8179 kXRegSize,
8180 PushPopRegList,
8181 PushPopByFour);
8182 PushPopSimpleHelper(count,
8183 claim,
8184 kXRegSize,
8185 PushPopRegList,
8186 PushPopRegList);
8187 }
8188 // Test with the maximum number of registers.
8189 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8190 claim,
8191 kXRegSize,
8192 PushPopByFour,
8193 PushPopByFour);
8194 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8195 claim,
8196 kXRegSize,
8197 PushPopByFour,
8198 PushPopRegList);
8199 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8200 claim,
8201 kXRegSize,
8202 PushPopRegList,
8203 PushPopByFour);
8204 PushPopSimpleHelper(kPushPopUseMaxRegCount,
8205 claim,
8206 kXRegSize,
8207 PushPopRegList,
8208 PushPopRegList);
8209 }
8210 }
8211
8212 // For the PushPopFP* tests, use the maximum number of registers that the test
8213 // supports (where a reg_count argument would otherwise be provided).
8214 static int const kPushPopFPUseMaxRegCount = -1;
8215
8216 // Test a simple push-pop pattern:
8217 // * Claim <claim> bytes to set the stack alignment.
8218 // * Push <reg_count> FP registers with size <reg_size>.
8219 // * Clobber the register contents.
8220 // * Pop <reg_count> FP registers to restore the original contents.
8221 // * Drop <claim> bytes to restore the original stack pointer.
8222 //
8223 // Different push and pop methods can be specified independently to test for
8224 // proper word-endian behaviour.
PushPopFPSimpleHelper(int reg_count,int claim,int reg_size,PushPopMethod push_method,PushPopMethod pop_method)8225 static void PushPopFPSimpleHelper(int reg_count,
8226 int claim,
8227 int reg_size,
8228 PushPopMethod push_method,
8229 PushPopMethod pop_method) {
8230 SETUP_WITH_FEATURES((reg_count == 0) ? CPUFeatures::kNone : CPUFeatures::kFP);
8231
8232 START();
8233
8234 // We can use any floating-point register. None of them are reserved for
8235 // debug code, for example.
8236 static RegList const allowed = ~0;
8237 if (reg_count == kPushPopFPUseMaxRegCount) {
8238 reg_count = CountSetBits(allowed, kNumberOfVRegisters);
8239 }
8240 // Work out which registers to use, based on reg_size.
8241 VRegister v[kNumberOfRegisters];
8242 VRegister d[kNumberOfRegisters];
8243 RegList list =
8244 PopulateVRegisterArray(NULL, d, v, reg_size, reg_count, allowed);
8245
8246 // Arbitrarily pick a register to use as a stack pointer.
8247 const Register& stack_pointer = x10;
8248
8249 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
8250 UseScratchRegisterScope temps(&masm);
8251 temps.ExcludeAll();
8252
8253 // The literal base is chosen to have two useful properties:
8254 // * When multiplied (using an integer) by small values (such as a register
8255 // index), this value is clearly readable in the result.
8256 // * The value is not formed from repeating fixed-size smaller values, so it
8257 // can be used to detect endianness-related errors.
8258 // * It is never a floating-point NaN, and will therefore always compare
8259 // equal to itself.
8260 uint64_t literal_base = 0x0100001000100101;
8261
8262 {
8263 VIXL_ASSERT(__ StackPointer().Is(sp));
8264 __ Mov(stack_pointer, __ StackPointer());
8265 __ SetStackPointer(stack_pointer);
8266
8267 int i;
8268
8269 // Initialize the registers, using X registers to load the literal.
8270 __ Mov(x0, 0);
8271 __ Mov(x1, literal_base);
8272 for (i = 0; i < reg_count; i++) {
8273 // Always write into the D register, to ensure that the upper word is
8274 // properly ignored by Push when testing S registers.
8275 __ Fmov(d[i], x0);
8276 // Calculate the next literal.
8277 __ Add(x0, x0, x1);
8278 }
8279
8280 // Claim memory first, as requested.
8281 __ Claim(claim);
8282
8283 switch (push_method) {
8284 case PushPopByFour:
8285 // Push high-numbered registers first (to the highest addresses).
8286 for (i = reg_count; i >= 4; i -= 4) {
8287 __ Push(v[i - 1], v[i - 2], v[i - 3], v[i - 4]);
8288 }
8289 // Finish off the leftovers.
8290 switch (i) {
8291 case 3:
8292 __ Push(v[2], v[1], v[0]);
8293 break;
8294 case 2:
8295 __ Push(v[1], v[0]);
8296 break;
8297 case 1:
8298 __ Push(v[0]);
8299 break;
8300 default:
8301 VIXL_ASSERT(i == 0);
8302 break;
8303 }
8304 break;
8305 case PushPopRegList:
8306 __ PushSizeRegList(list, reg_size, CPURegister::kVRegister);
8307 break;
8308 }
8309
8310 // Clobber all the registers, to ensure that they get repopulated by Pop.
8311 ClobberFP(&masm, list);
8312
8313 switch (pop_method) {
8314 case PushPopByFour:
8315 // Pop low-numbered registers first (from the lowest addresses).
8316 for (i = 0; i <= (reg_count - 4); i += 4) {
8317 __ Pop(v[i], v[i + 1], v[i + 2], v[i + 3]);
8318 }
8319 // Finish off the leftovers.
8320 switch (reg_count - i) {
8321 case 3:
8322 __ Pop(v[i], v[i + 1], v[i + 2]);
8323 break;
8324 case 2:
8325 __ Pop(v[i], v[i + 1]);
8326 break;
8327 case 1:
8328 __ Pop(v[i]);
8329 break;
8330 default:
8331 VIXL_ASSERT(i == reg_count);
8332 break;
8333 }
8334 break;
8335 case PushPopRegList:
8336 __ PopSizeRegList(list, reg_size, CPURegister::kVRegister);
8337 break;
8338 }
8339
8340 // Drop memory to restore the stack pointer.
8341 __ Drop(claim);
8342
8343 __ Mov(sp, __ StackPointer());
8344 __ SetStackPointer(sp);
8345 }
8346
8347 END();
8348
8349 if (CAN_RUN()) {
8350 RUN();
8351
8352 // Check that the register contents were preserved.
8353 // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
8354 // test that the upper word was properly cleared by Pop.
8355 literal_base &= (0xffffffffffffffff >> (64 - reg_size));
8356 for (int i = 0; i < reg_count; i++) {
8357 uint64_t literal = literal_base * i;
8358 double expected;
8359 memcpy(&expected, &literal, sizeof(expected));
8360 ASSERT_EQUAL_FP64(expected, d[i]);
8361 }
8362 }
8363 }
8364
8365
TEST(push_pop_fp_xreg_simple_32)8366 TEST(push_pop_fp_xreg_simple_32) {
8367 for (int claim = 0; claim <= 8; claim++) {
8368 for (int count = 0; count <= 8; count++) {
8369 PushPopFPSimpleHelper(count,
8370 claim,
8371 kSRegSize,
8372 PushPopByFour,
8373 PushPopByFour);
8374 PushPopFPSimpleHelper(count,
8375 claim,
8376 kSRegSize,
8377 PushPopByFour,
8378 PushPopRegList);
8379 PushPopFPSimpleHelper(count,
8380 claim,
8381 kSRegSize,
8382 PushPopRegList,
8383 PushPopByFour);
8384 PushPopFPSimpleHelper(count,
8385 claim,
8386 kSRegSize,
8387 PushPopRegList,
8388 PushPopRegList);
8389 }
8390 // Test with the maximum number of registers.
8391 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8392 claim,
8393 kSRegSize,
8394 PushPopByFour,
8395 PushPopByFour);
8396 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8397 claim,
8398 kSRegSize,
8399 PushPopByFour,
8400 PushPopRegList);
8401 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8402 claim,
8403 kSRegSize,
8404 PushPopRegList,
8405 PushPopByFour);
8406 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8407 claim,
8408 kSRegSize,
8409 PushPopRegList,
8410 PushPopRegList);
8411 }
8412 }
8413
8414
TEST(push_pop_fp_xreg_simple_64)8415 TEST(push_pop_fp_xreg_simple_64) {
8416 for (int claim = 0; claim <= 8; claim++) {
8417 for (int count = 0; count <= 8; count++) {
8418 PushPopFPSimpleHelper(count,
8419 claim,
8420 kDRegSize,
8421 PushPopByFour,
8422 PushPopByFour);
8423 PushPopFPSimpleHelper(count,
8424 claim,
8425 kDRegSize,
8426 PushPopByFour,
8427 PushPopRegList);
8428 PushPopFPSimpleHelper(count,
8429 claim,
8430 kDRegSize,
8431 PushPopRegList,
8432 PushPopByFour);
8433 PushPopFPSimpleHelper(count,
8434 claim,
8435 kDRegSize,
8436 PushPopRegList,
8437 PushPopRegList);
8438 }
8439 // Test with the maximum number of registers.
8440 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8441 claim,
8442 kDRegSize,
8443 PushPopByFour,
8444 PushPopByFour);
8445 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8446 claim,
8447 kDRegSize,
8448 PushPopByFour,
8449 PushPopRegList);
8450 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8451 claim,
8452 kDRegSize,
8453 PushPopRegList,
8454 PushPopByFour);
8455 PushPopFPSimpleHelper(kPushPopFPUseMaxRegCount,
8456 claim,
8457 kDRegSize,
8458 PushPopRegList,
8459 PushPopRegList);
8460 }
8461 }
8462
8463
8464 // Push and pop data using an overlapping combination of Push/Pop and
8465 // RegList-based methods.
PushPopMixedMethodsHelper(int claim,int reg_size)8466 static void PushPopMixedMethodsHelper(int claim, int reg_size) {
8467 SETUP();
8468
8469 // Arbitrarily pick a register to use as a stack pointer.
8470 const Register& stack_pointer = x5;
8471 const RegList allowed = ~stack_pointer.GetBit();
8472 // Work out which registers to use, based on reg_size.
8473 Register r[10];
8474 Register x[10];
8475 PopulateRegisterArray(NULL, x, r, reg_size, 10, allowed);
8476
8477 // Calculate some handy register lists.
8478 RegList r0_to_r3 = 0;
8479 for (int i = 0; i <= 3; i++) {
8480 r0_to_r3 |= x[i].GetBit();
8481 }
8482 RegList r4_to_r5 = 0;
8483 for (int i = 4; i <= 5; i++) {
8484 r4_to_r5 |= x[i].GetBit();
8485 }
8486 RegList r6_to_r9 = 0;
8487 for (int i = 6; i <= 9; i++) {
8488 r6_to_r9 |= x[i].GetBit();
8489 }
8490
8491 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
8492 UseScratchRegisterScope temps(&masm);
8493 temps.ExcludeAll();
8494
8495 // The literal base is chosen to have two useful properties:
8496 // * When multiplied by small values (such as a register index), this value
8497 // is clearly readable in the result.
8498 // * The value is not formed from repeating fixed-size smaller values, so it
8499 // can be used to detect endianness-related errors.
8500 uint64_t literal_base = 0x0100001000100101;
8501
8502 START();
8503 {
8504 VIXL_ASSERT(__ StackPointer().Is(sp));
8505 __ Mov(stack_pointer, __ StackPointer());
8506 __ SetStackPointer(stack_pointer);
8507
8508 // Claim memory first, as requested.
8509 __ Claim(claim);
8510
8511 __ Mov(x[3], literal_base * 3);
8512 __ Mov(x[2], literal_base * 2);
8513 __ Mov(x[1], literal_base * 1);
8514 __ Mov(x[0], literal_base * 0);
8515
8516 __ PushSizeRegList(r0_to_r3, reg_size);
8517 __ Push(r[3], r[2]);
8518
8519 Clobber(&masm, r0_to_r3);
8520 __ PopSizeRegList(r0_to_r3, reg_size);
8521
8522 __ Push(r[2], r[1], r[3], r[0]);
8523
8524 Clobber(&masm, r4_to_r5);
8525 __ Pop(r[4], r[5]);
8526 Clobber(&masm, r6_to_r9);
8527 __ Pop(r[6], r[7], r[8], r[9]);
8528
8529 // Drop memory to restore stack_pointer.
8530 __ Drop(claim);
8531
8532 __ Mov(sp, __ StackPointer());
8533 __ SetStackPointer(sp);
8534 }
8535
8536 END();
8537
8538 if (CAN_RUN()) {
8539 RUN();
8540
8541 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
8542 // that the upper word was properly cleared by Pop.
8543 literal_base &= (0xffffffffffffffff >> (64 - reg_size));
8544
8545 ASSERT_EQUAL_64(literal_base * 3, x[9]);
8546 ASSERT_EQUAL_64(literal_base * 2, x[8]);
8547 ASSERT_EQUAL_64(literal_base * 0, x[7]);
8548 ASSERT_EQUAL_64(literal_base * 3, x[6]);
8549 ASSERT_EQUAL_64(literal_base * 1, x[5]);
8550 ASSERT_EQUAL_64(literal_base * 2, x[4]);
8551 }
8552 }
8553
8554
TEST(push_pop_xreg_mixed_methods_64)8555 TEST(push_pop_xreg_mixed_methods_64) {
8556 for (int claim = 0; claim <= 8; claim++) {
8557 PushPopMixedMethodsHelper(claim, kXRegSize);
8558 }
8559 }
8560
8561
TEST(push_pop_xreg_mixed_methods_32)8562 TEST(push_pop_xreg_mixed_methods_32) {
8563 for (int claim = 0; claim <= 8; claim++) {
8564 PushPopMixedMethodsHelper(claim, kWRegSize);
8565 }
8566 }
8567
8568
8569 // Push and pop data using overlapping X- and W-sized quantities.
PushPopWXOverlapHelper(int reg_count,int claim)8570 static void PushPopWXOverlapHelper(int reg_count, int claim) {
8571 SETUP();
8572
8573 // Arbitrarily pick a register to use as a stack pointer.
8574 const Register& stack_pointer = x10;
8575 const RegList allowed = ~stack_pointer.GetBit();
8576 if (reg_count == kPushPopUseMaxRegCount) {
8577 reg_count = CountSetBits(allowed, kNumberOfRegisters);
8578 }
8579 // Work out which registers to use, based on reg_size.
8580 Register w[kNumberOfRegisters];
8581 Register x[kNumberOfRegisters];
8582 RegList list = PopulateRegisterArray(w, x, NULL, 0, reg_count, allowed);
8583
8584 // The number of W-sized slots we expect to pop. When we pop, we alternate
8585 // between W and X registers, so we need reg_count*1.5 W-sized slots.
8586 int const requested_w_slots = reg_count + reg_count / 2;
8587
8588 // Track what _should_ be on the stack, using W-sized slots.
8589 static int const kMaxWSlots = kNumberOfRegisters + kNumberOfRegisters / 2;
8590 uint32_t stack[kMaxWSlots];
8591 for (int i = 0; i < kMaxWSlots; i++) {
8592 stack[i] = 0xdeadbeef;
8593 }
8594
8595 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
8596 UseScratchRegisterScope temps(&masm);
8597 temps.ExcludeAll();
8598
8599 // The literal base is chosen to have two useful properties:
8600 // * When multiplied by small values (such as a register index), this value
8601 // is clearly readable in the result.
8602 // * The value is not formed from repeating fixed-size smaller values, so it
8603 // can be used to detect endianness-related errors.
8604 static uint64_t const literal_base = 0x0100001000100101;
8605 static uint64_t const literal_base_hi = literal_base >> 32;
8606 static uint64_t const literal_base_lo = literal_base & 0xffffffff;
8607 static uint64_t const literal_base_w = literal_base & 0xffffffff;
8608
8609 START();
8610 {
8611 VIXL_ASSERT(__ StackPointer().Is(sp));
8612 __ Mov(stack_pointer, __ StackPointer());
8613 __ SetStackPointer(stack_pointer);
8614
8615 // Initialize the registers.
8616 for (int i = 0; i < reg_count; i++) {
8617 // Always write into the X register, to ensure that the upper word is
8618 // properly ignored by Push when testing W registers.
8619 __ Mov(x[i], literal_base * i);
8620 }
8621
8622 // Claim memory first, as requested.
8623 __ Claim(claim);
8624
8625 // The push-pop pattern is as follows:
8626 // Push: Pop:
8627 // x[0](hi) -> w[0]
8628 // x[0](lo) -> x[1](hi)
8629 // w[1] -> x[1](lo)
8630 // w[1] -> w[2]
8631 // x[2](hi) -> x[2](hi)
8632 // x[2](lo) -> x[2](lo)
8633 // x[2](hi) -> w[3]
8634 // x[2](lo) -> x[4](hi)
8635 // x[2](hi) -> x[4](lo)
8636 // x[2](lo) -> w[5]
8637 // w[3] -> x[5](hi)
8638 // w[3] -> x[6](lo)
8639 // w[3] -> w[7]
8640 // w[3] -> x[8](hi)
8641 // x[4](hi) -> x[8](lo)
8642 // x[4](lo) -> w[9]
8643 // ... pattern continues ...
8644 //
8645 // That is, registers are pushed starting with the lower numbers,
8646 // alternating between x and w registers, and pushing i%4+1 copies of each,
8647 // where i is the register number.
8648 // Registers are popped starting with the higher numbers one-by-one,
8649 // alternating between x and w registers, but only popping one at a time.
8650 //
8651 // This pattern provides a wide variety of alignment effects and overlaps.
8652
8653 // ---- Push ----
8654
8655 int active_w_slots = 0;
8656 for (int i = 0; active_w_slots < requested_w_slots; i++) {
8657 VIXL_ASSERT(i < reg_count);
8658 // In order to test various arguments to PushMultipleTimes, and to try to
8659 // exercise different alignment and overlap effects, we push each
8660 // register a different number of times.
8661 int times = i % 4 + 1;
8662 if (i & 1) {
8663 // Push odd-numbered registers as W registers.
8664 __ PushMultipleTimes(times, w[i]);
8665 // Fill in the expected stack slots.
8666 for (int j = 0; j < times; j++) {
8667 if (w[i].Is(wzr)) {
8668 // The zero register always writes zeroes.
8669 stack[active_w_slots++] = 0;
8670 } else {
8671 stack[active_w_slots++] = literal_base_w * i;
8672 }
8673 }
8674 } else {
8675 // Push even-numbered registers as X registers.
8676 __ PushMultipleTimes(times, x[i]);
8677 // Fill in the expected stack slots.
8678 for (int j = 0; j < times; j++) {
8679 if (x[i].Is(xzr)) {
8680 // The zero register always writes zeroes.
8681 stack[active_w_slots++] = 0;
8682 stack[active_w_slots++] = 0;
8683 } else {
8684 stack[active_w_slots++] = literal_base_hi * i;
8685 stack[active_w_slots++] = literal_base_lo * i;
8686 }
8687 }
8688 }
8689 }
8690 // Because we were pushing several registers at a time, we probably pushed
8691 // more than we needed to.
8692 if (active_w_slots > requested_w_slots) {
8693 __ Drop((active_w_slots - requested_w_slots) * kWRegSizeInBytes);
8694 // Bump the number of active W-sized slots back to where it should be,
8695 // and fill the empty space with a placeholder value.
8696 do {
8697 stack[active_w_slots--] = 0xdeadbeef;
8698 } while (active_w_slots > requested_w_slots);
8699 }
8700
8701 // ---- Pop ----
8702
8703 Clobber(&masm, list);
8704
8705 // If popping an even number of registers, the first one will be X-sized.
8706 // Otherwise, the first one will be W-sized.
8707 bool next_is_64 = !(reg_count & 1);
8708 for (int i = reg_count - 1; i >= 0; i--) {
8709 if (next_is_64) {
8710 __ Pop(x[i]);
8711 active_w_slots -= 2;
8712 } else {
8713 __ Pop(w[i]);
8714 active_w_slots -= 1;
8715 }
8716 next_is_64 = !next_is_64;
8717 }
8718 VIXL_ASSERT(active_w_slots == 0);
8719
8720 // Drop memory to restore stack_pointer.
8721 __ Drop(claim);
8722
8723 __ Mov(sp, __ StackPointer());
8724 __ SetStackPointer(sp);
8725 }
8726
8727 END();
8728
8729 if (CAN_RUN()) {
8730 RUN();
8731
8732 int slot = 0;
8733 for (int i = 0; i < reg_count; i++) {
8734 // Even-numbered registers were written as W registers.
8735 // Odd-numbered registers were written as X registers.
8736 bool expect_64 = (i & 1);
8737 uint64_t expected;
8738
8739 if (expect_64) {
8740 uint64_t hi = stack[slot++];
8741 uint64_t lo = stack[slot++];
8742 expected = (hi << 32) | lo;
8743 } else {
8744 expected = stack[slot++];
8745 }
8746
8747 // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
8748 // test that the upper word was properly cleared by Pop.
8749 if (x[i].Is(xzr)) {
8750 ASSERT_EQUAL_64(0, x[i]);
8751 } else {
8752 ASSERT_EQUAL_64(expected, x[i]);
8753 }
8754 }
8755 VIXL_ASSERT(slot == requested_w_slots);
8756 }
8757 }
8758
8759
TEST(push_pop_xreg_wx_overlap)8760 TEST(push_pop_xreg_wx_overlap) {
8761 for (int claim = 0; claim <= 8; claim++) {
8762 for (int count = 1; count <= 8; count++) {
8763 PushPopWXOverlapHelper(count, claim);
8764 }
8765 // Test with the maximum number of registers.
8766 PushPopWXOverlapHelper(kPushPopUseMaxRegCount, claim);
8767 }
8768 }
8769
8770
TEST(push_pop_sp)8771 TEST(push_pop_sp) {
8772 SETUP();
8773
8774 START();
8775
8776 VIXL_ASSERT(sp.Is(__ StackPointer()));
8777
8778 // Acquire all temps from the MacroAssembler. They are used arbitrarily below.
8779 UseScratchRegisterScope temps(&masm);
8780 temps.ExcludeAll();
8781
8782 __ Mov(x3, 0x3333333333333333);
8783 __ Mov(x2, 0x2222222222222222);
8784 __ Mov(x1, 0x1111111111111111);
8785 __ Mov(x0, 0x0000000000000000);
8786 __ Claim(2 * kXRegSizeInBytes);
8787 __ PushXRegList(x0.GetBit() | x1.GetBit() | x2.GetBit() | x3.GetBit());
8788 __ Push(x3, x2);
8789 __ PopXRegList(x0.GetBit() | x1.GetBit() | x2.GetBit() | x3.GetBit());
8790 __ Push(x2, x1, x3, x0);
8791 __ Pop(x4, x5);
8792 __ Pop(x6, x7, x8, x9);
8793
8794 __ Claim(2 * kXRegSizeInBytes);
8795 __ PushWRegList(w0.GetBit() | w1.GetBit() | w2.GetBit() | w3.GetBit());
8796 __ Push(w3, w1, w2, w0);
8797 __ PopWRegList(w10.GetBit() | w11.GetBit() | w12.GetBit() | w13.GetBit());
8798 __ Pop(w14, w15, w16, w17);
8799
8800 __ Claim(2 * kXRegSizeInBytes);
8801 __ Push(w2, w2, w1, w1);
8802 __ Push(x3, x3);
8803 __ Pop(w18, w19, w20, w21);
8804 __ Pop(x22, x23);
8805
8806 __ Claim(2 * kXRegSizeInBytes);
8807 __ PushXRegList(x1.GetBit() | x22.GetBit());
8808 __ PopXRegList(x24.GetBit() | x26.GetBit());
8809
8810 __ Claim(2 * kXRegSizeInBytes);
8811 __ PushWRegList(w1.GetBit() | w2.GetBit() | w4.GetBit() | w22.GetBit());
8812 __ PopWRegList(w25.GetBit() | w27.GetBit() | w28.GetBit() | w29.GetBit());
8813
8814 __ Claim(2 * kXRegSizeInBytes);
8815 __ PushXRegList(0);
8816 __ PopXRegList(0);
8817 __ PushXRegList(0xffffffff);
8818 __ PopXRegList(0xffffffff);
8819 __ Drop(12 * kXRegSizeInBytes);
8820 END();
8821
8822 if (CAN_RUN()) {
8823 RUN();
8824
8825 ASSERT_EQUAL_64(0x1111111111111111, x3);
8826 ASSERT_EQUAL_64(0x0000000000000000, x2);
8827 ASSERT_EQUAL_64(0x3333333333333333, x1);
8828 ASSERT_EQUAL_64(0x2222222222222222, x0);
8829 ASSERT_EQUAL_64(0x3333333333333333, x9);
8830 ASSERT_EQUAL_64(0x2222222222222222, x8);
8831 ASSERT_EQUAL_64(0x0000000000000000, x7);
8832 ASSERT_EQUAL_64(0x3333333333333333, x6);
8833 ASSERT_EQUAL_64(0x1111111111111111, x5);
8834 ASSERT_EQUAL_64(0x2222222222222222, x4);
8835
8836 ASSERT_EQUAL_32(0x11111111U, w13);
8837 ASSERT_EQUAL_32(0x33333333U, w12);
8838 ASSERT_EQUAL_32(0x00000000U, w11);
8839 ASSERT_EQUAL_32(0x22222222U, w10);
8840 ASSERT_EQUAL_32(0x11111111U, w17);
8841 ASSERT_EQUAL_32(0x00000000U, w16);
8842 ASSERT_EQUAL_32(0x33333333U, w15);
8843 ASSERT_EQUAL_32(0x22222222U, w14);
8844
8845 ASSERT_EQUAL_32(0x11111111U, w18);
8846 ASSERT_EQUAL_32(0x11111111U, w19);
8847 ASSERT_EQUAL_32(0x11111111U, w20);
8848 ASSERT_EQUAL_32(0x11111111U, w21);
8849 ASSERT_EQUAL_64(0x3333333333333333, x22);
8850 ASSERT_EQUAL_64(0x0000000000000000, x23);
8851
8852 ASSERT_EQUAL_64(0x3333333333333333, x24);
8853 ASSERT_EQUAL_64(0x3333333333333333, x26);
8854
8855 ASSERT_EQUAL_32(0x33333333U, w25);
8856 ASSERT_EQUAL_32(0x00000000U, w27);
8857 ASSERT_EQUAL_32(0x22222222U, w28);
8858 ASSERT_EQUAL_32(0x33333333U, w29);
8859 }
8860 }
8861
8862
TEST(printf)8863 TEST(printf) {
8864 // RegisterDump::Dump uses NEON.
8865 // Printf uses FP to cast FP arguments to doubles.
8866 SETUP_WITH_FEATURES(CPUFeatures::kNEON, CPUFeatures::kFP);
8867
8868 START();
8869
8870 char const* test_plain_string = "Printf with no arguments.\n";
8871 char const* test_substring = "'This is a substring.'";
8872 RegisterDump before;
8873
8874 // Initialize x29 to the value of the stack pointer. We will use x29 as a
8875 // temporary stack pointer later, and initializing it in this way allows the
8876 // RegisterDump check to pass.
8877 __ Mov(x29, __ StackPointer());
8878
8879 // Test simple integer arguments.
8880 __ Mov(x0, 1234);
8881 __ Mov(x1, 0x1234);
8882
8883 // Test simple floating-point arguments.
8884 __ Fmov(d0, 1.234);
8885
8886 // Test pointer (string) arguments.
8887 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
8888
8889 // Test the maximum number of arguments, and sign extension.
8890 __ Mov(w3, 0xffffffff);
8891 __ Mov(w4, 0xffffffff);
8892 __ Mov(x5, 0xffffffffffffffff);
8893 __ Mov(x6, 0xffffffffffffffff);
8894 __ Fmov(s1, 1.234);
8895 __ Fmov(s2, 2.345);
8896 __ Fmov(d3, 3.456);
8897 __ Fmov(d4, 4.567);
8898
8899 // Test printing callee-saved registers.
8900 __ Mov(x28, 0x123456789abcdef);
8901 __ Fmov(d10, 42.0);
8902
8903 // Test with three arguments.
8904 __ Mov(x10, 3);
8905 __ Mov(x11, 40);
8906 __ Mov(x12, 500);
8907
8908 // A single character.
8909 __ Mov(w13, 'x');
8910
8911 // Check that we don't clobber any registers.
8912 before.Dump(&masm);
8913
8914 __ Printf(test_plain_string); // NOLINT(runtime/printf)
8915 __ Printf("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
8916 __ Printf("w5: %" PRId32 ", x5: %" PRId64 "\n", w5, x5);
8917 __ Printf("d0: %f\n", d0);
8918 __ Printf("Test %%s: %s\n", x2);
8919 __ Printf("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32
8920 "\n"
8921 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
8922 w3,
8923 w4,
8924 x5,
8925 x6);
8926 __ Printf("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
8927 __ Printf("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
8928 __ Printf("%g\n", d10);
8929 __ Printf("%%%%%s%%%c%%\n", x2, w13);
8930
8931 // Print the stack pointer (sp).
8932 __ Printf("StackPointer(sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
8933 __ StackPointer(),
8934 __ StackPointer().W());
8935
8936 // Test with a different stack pointer.
8937 const Register old_stack_pointer = __ StackPointer();
8938 __ Mov(x29, old_stack_pointer);
8939 __ SetStackPointer(x29);
8940 // Print the stack pointer (not sp).
8941 __ Printf("StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
8942 __ StackPointer(),
8943 __ StackPointer().W());
8944 __ Mov(old_stack_pointer, __ StackPointer());
8945 __ SetStackPointer(old_stack_pointer);
8946
8947 // Test with three arguments.
8948 __ Printf("3=%u, 4=%u, 5=%u\n", x10, x11, x12);
8949
8950 // Mixed argument types.
8951 __ Printf("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
8952 w3,
8953 s1,
8954 x5,
8955 d3);
8956 __ Printf("s1: %f, d3: %f, w3: %" PRId32 ", x5: %" PRId64 "\n",
8957 s1,
8958 d3,
8959 w3,
8960 x5);
8961
8962 END();
8963 if (CAN_RUN()) {
8964 RUN();
8965
8966 // We cannot easily test the output of the Printf sequences, and because
8967 // Printf preserves all registers by default, we can't look at the number of
8968 // bytes that were printed. However, the printf_no_preserve test should
8969 // check
8970 // that, and here we just test that we didn't clobber any registers.
8971 ASSERT_EQUAL_REGISTERS(before);
8972 }
8973 }
8974
8975
TEST(printf_no_preserve)8976 TEST(printf_no_preserve) {
8977 // PrintfNoPreserve uses FP to cast FP arguments to doubles.
8978 SETUP_WITH_FEATURES(CPUFeatures::kFP);
8979
8980 START();
8981
8982 char const* test_plain_string = "Printf with no arguments.\n";
8983 char const* test_substring = "'This is a substring.'";
8984
8985 __ PrintfNoPreserve(test_plain_string);
8986 __ Mov(x19, x0);
8987
8988 // Test simple integer arguments.
8989 __ Mov(x0, 1234);
8990 __ Mov(x1, 0x1234);
8991 __ PrintfNoPreserve("x0: %" PRId64 ", x1: 0x%08" PRIx64 "\n", x0, x1);
8992 __ Mov(x20, x0);
8993
8994 // Test simple floating-point arguments.
8995 __ Fmov(d0, 1.234);
8996 __ PrintfNoPreserve("d0: %f\n", d0);
8997 __ Mov(x21, x0);
8998
8999 // Test pointer (string) arguments.
9000 __ Mov(x2, reinterpret_cast<uintptr_t>(test_substring));
9001 __ PrintfNoPreserve("Test %%s: %s\n", x2);
9002 __ Mov(x22, x0);
9003
9004 // Test the maximum number of arguments, and sign extension.
9005 __ Mov(w3, 0xffffffff);
9006 __ Mov(w4, 0xffffffff);
9007 __ Mov(x5, 0xffffffffffffffff);
9008 __ Mov(x6, 0xffffffffffffffff);
9009 __ PrintfNoPreserve("w3(uint32): %" PRIu32 "\nw4(int32): %" PRId32
9010 "\n"
9011 "x5(uint64): %" PRIu64 "\nx6(int64): %" PRId64 "\n",
9012 w3,
9013 w4,
9014 x5,
9015 x6);
9016 __ Mov(x23, x0);
9017
9018 __ Fmov(s1, 1.234);
9019 __ Fmov(s2, 2.345);
9020 __ Fmov(d3, 3.456);
9021 __ Fmov(d4, 4.567);
9022 __ PrintfNoPreserve("%%f: %f\n%%g: %g\n%%e: %e\n%%E: %E\n", s1, s2, d3, d4);
9023 __ Mov(x24, x0);
9024
9025 // Test printing callee-saved registers.
9026 __ Mov(x28, 0x123456789abcdef);
9027 __ PrintfNoPreserve("0x%" PRIx32 ", 0x%" PRIx64 "\n", w28, x28);
9028 __ Mov(x25, x0);
9029
9030 __ Fmov(d10, 42.0);
9031 __ PrintfNoPreserve("%g\n", d10);
9032 __ Mov(x26, x0);
9033
9034 // Test with a different stack pointer.
9035 const Register old_stack_pointer = __ StackPointer();
9036 __ Mov(x29, old_stack_pointer);
9037 __ SetStackPointer(x29);
9038 // Print the stack pointer (not sp).
9039 __ PrintfNoPreserve("StackPointer(not sp): 0x%016" PRIx64 ", 0x%08" PRIx32
9040 "\n",
9041 __ StackPointer(),
9042 __ StackPointer().W());
9043 __ Mov(x27, x0);
9044 __ Mov(old_stack_pointer, __ StackPointer());
9045 __ SetStackPointer(old_stack_pointer);
9046
9047 // Test with three arguments.
9048 __ Mov(x3, 3);
9049 __ Mov(x4, 40);
9050 __ Mov(x5, 500);
9051 __ PrintfNoPreserve("3=%u, 4=%u, 5=%u\n", x3, x4, x5);
9052 __ Mov(x28, x0);
9053
9054 // Mixed argument types.
9055 __ Mov(w3, 0xffffffff);
9056 __ Fmov(s1, 1.234);
9057 __ Mov(x5, 0xffffffffffffffff);
9058 __ Fmov(d3, 3.456);
9059 __ PrintfNoPreserve("w3: %" PRIu32 ", s1: %f, x5: %" PRIu64 ", d3: %f\n",
9060 w3,
9061 s1,
9062 x5,
9063 d3);
9064 __ Mov(x29, x0);
9065
9066 END();
9067 if (CAN_RUN()) {
9068 RUN();
9069
9070 // We cannot easily test the exact output of the Printf sequences, but we
9071 // can
9072 // use the return code to check that the string length was correct.
9073
9074 // Printf with no arguments.
9075 ASSERT_EQUAL_64(strlen(test_plain_string), x19);
9076 // x0: 1234, x1: 0x00001234
9077 ASSERT_EQUAL_64(25, x20);
9078 // d0: 1.234000
9079 ASSERT_EQUAL_64(13, x21);
9080 // Test %s: 'This is a substring.'
9081 ASSERT_EQUAL_64(32, x22);
9082 // w3(uint32): 4294967295
9083 // w4(int32): -1
9084 // x5(uint64): 18446744073709551615
9085 // x6(int64): -1
9086 ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
9087 // %f: 1.234000
9088 // %g: 2.345
9089 // %e: 3.456000e+00
9090 // %E: 4.567000E+00
9091 ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
9092 // 0x89abcdef, 0x123456789abcdef
9093 ASSERT_EQUAL_64(30, x25);
9094 // 42
9095 ASSERT_EQUAL_64(3, x26);
9096 // StackPointer(not sp): 0x00007fb037ae2370, 0x37ae2370
9097 // Note: This is an example value, but the field width is fixed here so the
9098 // string length is still predictable.
9099 ASSERT_EQUAL_64(53, x27);
9100 // 3=3, 4=40, 5=500
9101 ASSERT_EQUAL_64(17, x28);
9102 // w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
9103 ASSERT_EQUAL_64(69, x29);
9104 }
9105 }
9106
9107
TEST(trace)9108 TEST(trace) {
9109 // The Trace helper should not generate any code unless the simulator is being
9110 // used.
9111 SETUP();
9112 START();
9113
9114 Label start;
9115 __ Bind(&start);
9116 __ Trace(LOG_ALL, TRACE_ENABLE);
9117 __ Trace(LOG_ALL, TRACE_DISABLE);
9118 if (masm.GenerateSimulatorCode()) {
9119 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&start) > 0);
9120 } else {
9121 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&start) == 0);
9122 }
9123
9124 END();
9125 }
9126
9127
TEST(log)9128 TEST(log) {
9129 // The Log helper should not generate any code unless the simulator is being
9130 // used.
9131 SETUP();
9132 START();
9133
9134 Label start;
9135 __ Bind(&start);
9136 __ Log(LOG_ALL);
9137 if (masm.GenerateSimulatorCode()) {
9138 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&start) > 0);
9139 } else {
9140 VIXL_CHECK(__ GetSizeOfCodeGeneratedSince(&start) == 0);
9141 }
9142
9143 END();
9144 }
9145
9146
TEST(blr_lr)9147 TEST(blr_lr) {
9148 // A simple test to check that the simulator correcty handle "blr lr".
9149 SETUP();
9150
9151 START();
9152 Label target;
9153 Label end;
9154
9155 __ Mov(x0, 0x0);
9156 __ Adr(lr, &target);
9157
9158 __ Blr(lr);
9159 __ Mov(x0, 0xdeadbeef);
9160 __ B(&end);
9161
9162 __ Bind(&target);
9163 __ Mov(x0, 0xc001c0de);
9164
9165 __ Bind(&end);
9166 END();
9167
9168 if (CAN_RUN()) {
9169 RUN();
9170
9171 ASSERT_EQUAL_64(0xc001c0de, x0);
9172 }
9173 }
9174
9175
TEST(barriers)9176 TEST(barriers) {
9177 // Generate all supported barriers, this is just a smoke test
9178 SETUP();
9179
9180 START();
9181
9182 // DMB
9183 __ Dmb(FullSystem, BarrierAll);
9184 __ Dmb(FullSystem, BarrierReads);
9185 __ Dmb(FullSystem, BarrierWrites);
9186 __ Dmb(FullSystem, BarrierOther);
9187
9188 __ Dmb(InnerShareable, BarrierAll);
9189 __ Dmb(InnerShareable, BarrierReads);
9190 __ Dmb(InnerShareable, BarrierWrites);
9191 __ Dmb(InnerShareable, BarrierOther);
9192
9193 __ Dmb(NonShareable, BarrierAll);
9194 __ Dmb(NonShareable, BarrierReads);
9195 __ Dmb(NonShareable, BarrierWrites);
9196 __ Dmb(NonShareable, BarrierOther);
9197
9198 __ Dmb(OuterShareable, BarrierAll);
9199 __ Dmb(OuterShareable, BarrierReads);
9200 __ Dmb(OuterShareable, BarrierWrites);
9201 __ Dmb(OuterShareable, BarrierOther);
9202
9203 // DSB
9204 __ Dsb(FullSystem, BarrierAll);
9205 __ Dsb(FullSystem, BarrierReads);
9206 __ Dsb(FullSystem, BarrierWrites);
9207 __ Dsb(FullSystem, BarrierOther);
9208
9209 __ Dsb(InnerShareable, BarrierAll);
9210 __ Dsb(InnerShareable, BarrierReads);
9211 __ Dsb(InnerShareable, BarrierWrites);
9212 __ Dsb(InnerShareable, BarrierOther);
9213
9214 __ Dsb(NonShareable, BarrierAll);
9215 __ Dsb(NonShareable, BarrierReads);
9216 __ Dsb(NonShareable, BarrierWrites);
9217 __ Dsb(NonShareable, BarrierOther);
9218
9219 __ Dsb(OuterShareable, BarrierAll);
9220 __ Dsb(OuterShareable, BarrierReads);
9221 __ Dsb(OuterShareable, BarrierWrites);
9222 __ Dsb(OuterShareable, BarrierOther);
9223
9224 // ISB
9225 __ Isb();
9226
9227 END();
9228
9229 if (CAN_RUN()) {
9230 RUN();
9231 }
9232 }
9233
9234
TEST(ldar_stlr)9235 TEST(ldar_stlr) {
9236 // The middle value is read, modified, and written. The padding exists only to
9237 // check for over-write.
9238 uint8_t b[] = {0, 0x12, 0};
9239 uint16_t h[] = {0, 0x1234, 0};
9240 uint32_t w[] = {0, 0x12345678, 0};
9241 uint64_t x[] = {0, 0x123456789abcdef0, 0};
9242
9243 SETUP();
9244 START();
9245
9246 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
9247 __ Ldarb(w0, MemOperand(x10));
9248 __ Add(w0, w0, 1);
9249 __ Stlrb(w0, MemOperand(x10));
9250
9251 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
9252 __ Ldarh(w0, MemOperand(x10));
9253 __ Add(w0, w0, 1);
9254 __ Stlrh(w0, MemOperand(x10));
9255
9256 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
9257 __ Ldar(w0, MemOperand(x10));
9258 __ Add(w0, w0, 1);
9259 __ Stlr(w0, MemOperand(x10));
9260
9261 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
9262 __ Ldar(x0, MemOperand(x10));
9263 __ Add(x0, x0, 1);
9264 __ Stlr(x0, MemOperand(x10));
9265
9266 END();
9267 if (CAN_RUN()) {
9268 RUN();
9269
9270 ASSERT_EQUAL_32(0x13, b[1]);
9271 ASSERT_EQUAL_32(0x1235, h[1]);
9272 ASSERT_EQUAL_32(0x12345679, w[1]);
9273 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
9274
9275 // Check for over-write.
9276 ASSERT_EQUAL_32(0, b[0]);
9277 ASSERT_EQUAL_32(0, b[2]);
9278 ASSERT_EQUAL_32(0, h[0]);
9279 ASSERT_EQUAL_32(0, h[2]);
9280 ASSERT_EQUAL_32(0, w[0]);
9281 ASSERT_EQUAL_32(0, w[2]);
9282 ASSERT_EQUAL_64(0, x[0]);
9283 ASSERT_EQUAL_64(0, x[2]);
9284 }
9285 }
9286
9287
TEST(ldlar_stllr)9288 TEST(ldlar_stllr) {
9289 // The middle value is read, modified, and written. The padding exists only to
9290 // check for over-write.
9291 uint8_t b[] = {0, 0x12, 0};
9292 uint16_t h[] = {0, 0x1234, 0};
9293 uint32_t w[] = {0, 0x12345678, 0};
9294 uint64_t x[] = {0, 0x123456789abcdef0, 0};
9295
9296 SETUP_WITH_FEATURES(CPUFeatures::kLORegions);
9297
9298 START();
9299
9300 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
9301 __ Ldlarb(w0, MemOperand(x10));
9302 __ Add(w0, w0, 1);
9303 __ Stllrb(w0, MemOperand(x10));
9304
9305 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
9306 __ Ldlarh(w0, MemOperand(x10));
9307 __ Add(w0, w0, 1);
9308 __ Stllrh(w0, MemOperand(x10));
9309
9310 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
9311 __ Ldlar(w0, MemOperand(x10));
9312 __ Add(w0, w0, 1);
9313 __ Stllr(w0, MemOperand(x10));
9314
9315 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
9316 __ Ldlar(x0, MemOperand(x10));
9317 __ Add(x0, x0, 1);
9318 __ Stllr(x0, MemOperand(x10));
9319
9320 END();
9321
9322 if (CAN_RUN()) {
9323 RUN();
9324
9325 ASSERT_EQUAL_32(0x13, b[1]);
9326 ASSERT_EQUAL_32(0x1235, h[1]);
9327 ASSERT_EQUAL_32(0x12345679, w[1]);
9328 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
9329
9330 // Check for over-write.
9331 ASSERT_EQUAL_32(0, b[0]);
9332 ASSERT_EQUAL_32(0, b[2]);
9333 ASSERT_EQUAL_32(0, h[0]);
9334 ASSERT_EQUAL_32(0, h[2]);
9335 ASSERT_EQUAL_32(0, w[0]);
9336 ASSERT_EQUAL_32(0, w[2]);
9337 ASSERT_EQUAL_64(0, x[0]);
9338 ASSERT_EQUAL_64(0, x[2]);
9339 }
9340 }
9341
9342
TEST(ldxr_stxr)9343 TEST(ldxr_stxr) {
9344 // The middle value is read, modified, and written. The padding exists only to
9345 // check for over-write.
9346 uint8_t b[] = {0, 0x12, 0};
9347 uint16_t h[] = {0, 0x1234, 0};
9348 uint32_t w[] = {0, 0x12345678, 0};
9349 uint64_t x[] = {0, 0x123456789abcdef0, 0};
9350
9351 // As above, but get suitably-aligned values for ldxp and stxp.
9352 uint32_t wp_data[] = {0, 0, 0, 0, 0};
9353 uint32_t* wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
9354 wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
9355 wp[2] = 0x87654321;
9356 uint64_t xp_data[] = {0, 0, 0, 0, 0};
9357 uint64_t* xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
9358 xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
9359 xp[2] = 0x0fedcba987654321;
9360
9361 SETUP();
9362 START();
9363
9364 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
9365 Label try_b;
9366 __ Bind(&try_b);
9367 __ Ldxrb(w0, MemOperand(x10));
9368 __ Add(w0, w0, 1);
9369 __ Stxrb(w5, w0, MemOperand(x10));
9370 __ Cbnz(w5, &try_b);
9371
9372 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
9373 Label try_h;
9374 __ Bind(&try_h);
9375 __ Ldxrh(w0, MemOperand(x10));
9376 __ Add(w0, w0, 1);
9377 __ Stxrh(w5, w0, MemOperand(x10));
9378 __ Cbnz(w5, &try_h);
9379
9380 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
9381 Label try_w;
9382 __ Bind(&try_w);
9383 __ Ldxr(w0, MemOperand(x10));
9384 __ Add(w0, w0, 1);
9385 __ Stxr(w5, w0, MemOperand(x10));
9386 __ Cbnz(w5, &try_w);
9387
9388 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
9389 Label try_x;
9390 __ Bind(&try_x);
9391 __ Ldxr(x0, MemOperand(x10));
9392 __ Add(x0, x0, 1);
9393 __ Stxr(w5, x0, MemOperand(x10));
9394 __ Cbnz(w5, &try_x);
9395
9396 __ Mov(x10, reinterpret_cast<uintptr_t>(&wp[1]));
9397 Label try_wp;
9398 __ Bind(&try_wp);
9399 __ Ldxp(w0, w1, MemOperand(x10));
9400 __ Add(w0, w0, 1);
9401 __ Add(w1, w1, 1);
9402 __ Stxp(w5, w0, w1, MemOperand(x10));
9403 __ Cbnz(w5, &try_wp);
9404
9405 __ Mov(x10, reinterpret_cast<uintptr_t>(&xp[1]));
9406 Label try_xp;
9407 __ Bind(&try_xp);
9408 __ Ldxp(x0, x1, MemOperand(x10));
9409 __ Add(x0, x0, 1);
9410 __ Add(x1, x1, 1);
9411 __ Stxp(w5, x0, x1, MemOperand(x10));
9412 __ Cbnz(w5, &try_xp);
9413
9414 END();
9415 if (CAN_RUN()) {
9416 RUN();
9417
9418 ASSERT_EQUAL_32(0x13, b[1]);
9419 ASSERT_EQUAL_32(0x1235, h[1]);
9420 ASSERT_EQUAL_32(0x12345679, w[1]);
9421 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
9422 ASSERT_EQUAL_32(0x12345679, wp[1]);
9423 ASSERT_EQUAL_32(0x87654322, wp[2]);
9424 ASSERT_EQUAL_64(0x123456789abcdef1, xp[1]);
9425 ASSERT_EQUAL_64(0x0fedcba987654322, xp[2]);
9426
9427 // Check for over-write.
9428 ASSERT_EQUAL_32(0, b[0]);
9429 ASSERT_EQUAL_32(0, b[2]);
9430 ASSERT_EQUAL_32(0, h[0]);
9431 ASSERT_EQUAL_32(0, h[2]);
9432 ASSERT_EQUAL_32(0, w[0]);
9433 ASSERT_EQUAL_32(0, w[2]);
9434 ASSERT_EQUAL_64(0, x[0]);
9435 ASSERT_EQUAL_64(0, x[2]);
9436 ASSERT_EQUAL_32(0, wp[0]);
9437 ASSERT_EQUAL_32(0, wp[3]);
9438 ASSERT_EQUAL_64(0, xp[0]);
9439 ASSERT_EQUAL_64(0, xp[3]);
9440 }
9441 }
9442
9443
TEST(ldaxr_stlxr)9444 TEST(ldaxr_stlxr) {
9445 // The middle value is read, modified, and written. The padding exists only to
9446 // check for over-write.
9447 uint8_t b[] = {0, 0x12, 0};
9448 uint16_t h[] = {0, 0x1234, 0};
9449 uint32_t w[] = {0, 0x12345678, 0};
9450 uint64_t x[] = {0, 0x123456789abcdef0, 0};
9451
9452 // As above, but get suitably-aligned values for ldxp and stxp.
9453 uint32_t wp_data[] = {0, 0, 0, 0, 0};
9454 uint32_t* wp = AlignUp(wp_data + 1, kWRegSizeInBytes * 2) - 1;
9455 wp[1] = 0x12345678; // wp[1] is 64-bit-aligned.
9456 wp[2] = 0x87654321;
9457 uint64_t xp_data[] = {0, 0, 0, 0, 0};
9458 uint64_t* xp = AlignUp(xp_data + 1, kXRegSizeInBytes * 2) - 1;
9459 xp[1] = 0x123456789abcdef0; // xp[1] is 128-bit-aligned.
9460 xp[2] = 0x0fedcba987654321;
9461
9462 SETUP();
9463 START();
9464
9465 __ Mov(x10, reinterpret_cast<uintptr_t>(&b[1]));
9466 Label try_b;
9467 __ Bind(&try_b);
9468 __ Ldaxrb(w0, MemOperand(x10));
9469 __ Add(w0, w0, 1);
9470 __ Stlxrb(w5, w0, MemOperand(x10));
9471 __ Cbnz(w5, &try_b);
9472
9473 __ Mov(x10, reinterpret_cast<uintptr_t>(&h[1]));
9474 Label try_h;
9475 __ Bind(&try_h);
9476 __ Ldaxrh(w0, MemOperand(x10));
9477 __ Add(w0, w0, 1);
9478 __ Stlxrh(w5, w0, MemOperand(x10));
9479 __ Cbnz(w5, &try_h);
9480
9481 __ Mov(x10, reinterpret_cast<uintptr_t>(&w[1]));
9482 Label try_w;
9483 __ Bind(&try_w);
9484 __ Ldaxr(w0, MemOperand(x10));
9485 __ Add(w0, w0, 1);
9486 __ Stlxr(w5, w0, MemOperand(x10));
9487 __ Cbnz(w5, &try_w);
9488
9489 __ Mov(x10, reinterpret_cast<uintptr_t>(&x[1]));
9490 Label try_x;
9491 __ Bind(&try_x);
9492 __ Ldaxr(x0, MemOperand(x10));
9493 __ Add(x0, x0, 1);
9494 __ Stlxr(w5, x0, MemOperand(x10));
9495 __ Cbnz(w5, &try_x);
9496
9497 __ Mov(x10, reinterpret_cast<uintptr_t>(&wp[1]));
9498 Label try_wp;
9499 __ Bind(&try_wp);
9500 __ Ldaxp(w0, w1, MemOperand(x10));
9501 __ Add(w0, w0, 1);
9502 __ Add(w1, w1, 1);
9503 __ Stlxp(w5, w0, w1, MemOperand(x10));
9504 __ Cbnz(w5, &try_wp);
9505
9506 __ Mov(x10, reinterpret_cast<uintptr_t>(&xp[1]));
9507 Label try_xp;
9508 __ Bind(&try_xp);
9509 __ Ldaxp(x0, x1, MemOperand(x10));
9510 __ Add(x0, x0, 1);
9511 __ Add(x1, x1, 1);
9512 __ Stlxp(w5, x0, x1, MemOperand(x10));
9513 __ Cbnz(w5, &try_xp);
9514
9515 END();
9516 if (CAN_RUN()) {
9517 RUN();
9518
9519 ASSERT_EQUAL_32(0x13, b[1]);
9520 ASSERT_EQUAL_32(0x1235, h[1]);
9521 ASSERT_EQUAL_32(0x12345679, w[1]);
9522 ASSERT_EQUAL_64(0x123456789abcdef1, x[1]);
9523 ASSERT_EQUAL_32(0x12345679, wp[1]);
9524 ASSERT_EQUAL_32(0x87654322, wp[2]);
9525 ASSERT_EQUAL_64(0x123456789abcdef1, xp[1]);
9526 ASSERT_EQUAL_64(0x0fedcba987654322, xp[2]);
9527
9528 // Check for over-write.
9529 ASSERT_EQUAL_32(0, b[0]);
9530 ASSERT_EQUAL_32(0, b[2]);
9531 ASSERT_EQUAL_32(0, h[0]);
9532 ASSERT_EQUAL_32(0, h[2]);
9533 ASSERT_EQUAL_32(0, w[0]);
9534 ASSERT_EQUAL_32(0, w[2]);
9535 ASSERT_EQUAL_64(0, x[0]);
9536 ASSERT_EQUAL_64(0, x[2]);
9537 ASSERT_EQUAL_32(0, wp[0]);
9538 ASSERT_EQUAL_32(0, wp[3]);
9539 ASSERT_EQUAL_64(0, xp[0]);
9540 ASSERT_EQUAL_64(0, xp[3]);
9541 }
9542 }
9543
9544
TEST(clrex)9545 TEST(clrex) {
9546 // This data should never be written.
9547 uint64_t data[] = {0, 0, 0};
9548 uint64_t* data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
9549
9550 SETUP();
9551 START();
9552
9553 __ Mov(x10, reinterpret_cast<uintptr_t>(data_aligned));
9554 __ Mov(w6, 0);
9555
9556 __ Ldxrb(w0, MemOperand(x10));
9557 __ Clrex();
9558 __ Add(w0, w0, 1);
9559 __ Stxrb(w5, w0, MemOperand(x10));
9560 __ Add(w6, w6, w5);
9561
9562 __ Ldxrh(w0, MemOperand(x10));
9563 __ Clrex();
9564 __ Add(w0, w0, 1);
9565 __ Stxrh(w5, w0, MemOperand(x10));
9566 __ Add(w6, w6, w5);
9567
9568 __ Ldxr(w0, MemOperand(x10));
9569 __ Clrex();
9570 __ Add(w0, w0, 1);
9571 __ Stxr(w5, w0, MemOperand(x10));
9572 __ Add(w6, w6, w5);
9573
9574 __ Ldxr(x0, MemOperand(x10));
9575 __ Clrex();
9576 __ Add(x0, x0, 1);
9577 __ Stxr(w5, x0, MemOperand(x10));
9578 __ Add(w6, w6, w5);
9579
9580 __ Ldxp(w0, w1, MemOperand(x10));
9581 __ Clrex();
9582 __ Add(w0, w0, 1);
9583 __ Add(w1, w1, 1);
9584 __ Stxp(w5, w0, w1, MemOperand(x10));
9585 __ Add(w6, w6, w5);
9586
9587 __ Ldxp(x0, x1, MemOperand(x10));
9588 __ Clrex();
9589 __ Add(x0, x0, 1);
9590 __ Add(x1, x1, 1);
9591 __ Stxp(w5, x0, x1, MemOperand(x10));
9592 __ Add(w6, w6, w5);
9593
9594 // Acquire-release variants.
9595
9596 __ Ldaxrb(w0, MemOperand(x10));
9597 __ Clrex();
9598 __ Add(w0, w0, 1);
9599 __ Stlxrb(w5, w0, MemOperand(x10));
9600 __ Add(w6, w6, w5);
9601
9602 __ Ldaxrh(w0, MemOperand(x10));
9603 __ Clrex();
9604 __ Add(w0, w0, 1);
9605 __ Stlxrh(w5, w0, MemOperand(x10));
9606 __ Add(w6, w6, w5);
9607
9608 __ Ldaxr(w0, MemOperand(x10));
9609 __ Clrex();
9610 __ Add(w0, w0, 1);
9611 __ Stlxr(w5, w0, MemOperand(x10));
9612 __ Add(w6, w6, w5);
9613
9614 __ Ldaxr(x0, MemOperand(x10));
9615 __ Clrex();
9616 __ Add(x0, x0, 1);
9617 __ Stlxr(w5, x0, MemOperand(x10));
9618 __ Add(w6, w6, w5);
9619
9620 __ Ldaxp(w0, w1, MemOperand(x10));
9621 __ Clrex();
9622 __ Add(w0, w0, 1);
9623 __ Add(w1, w1, 1);
9624 __ Stlxp(w5, w0, w1, MemOperand(x10));
9625 __ Add(w6, w6, w5);
9626
9627 __ Ldaxp(x0, x1, MemOperand(x10));
9628 __ Clrex();
9629 __ Add(x0, x0, 1);
9630 __ Add(x1, x1, 1);
9631 __ Stlxp(w5, x0, x1, MemOperand(x10));
9632 __ Add(w6, w6, w5);
9633
9634 END();
9635 if (CAN_RUN()) {
9636 RUN();
9637
9638 // None of the 12 store-exclusives should have succeeded.
9639 ASSERT_EQUAL_32(12, w6);
9640
9641 ASSERT_EQUAL_64(0, data[0]);
9642 ASSERT_EQUAL_64(0, data[1]);
9643 ASSERT_EQUAL_64(0, data[2]);
9644 }
9645 }
9646
9647
9648 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
9649 // Check that the simulator occasionally makes store-exclusive fail.
TEST(ldxr_stxr_fail)9650 TEST(ldxr_stxr_fail) {
9651 uint64_t data[] = {0, 0, 0};
9652 uint64_t* data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
9653
9654 // Impose a hard limit on the number of attempts, so the test cannot hang.
9655 static const uint64_t kWatchdog = 10000;
9656 Label done;
9657
9658 SETUP();
9659 START();
9660
9661 __ Mov(x10, reinterpret_cast<uintptr_t>(data_aligned));
9662 __ Mov(x11, kWatchdog);
9663
9664 // This loop is the opposite of what we normally do with ldxr and stxr; we
9665 // keep trying until we fail (or the watchdog counter runs out).
9666 Label try_b;
9667 __ Bind(&try_b);
9668 __ Ldxrb(w0, MemOperand(x10));
9669 __ Stxrb(w5, w0, MemOperand(x10));
9670 // Check the watchdog counter.
9671 __ Sub(x11, x11, 1);
9672 __ Cbz(x11, &done);
9673 // Check the exclusive-store result.
9674 __ Cbz(w5, &try_b);
9675
9676 Label try_h;
9677 __ Bind(&try_h);
9678 __ Ldxrh(w0, MemOperand(x10));
9679 __ Stxrh(w5, w0, MemOperand(x10));
9680 __ Sub(x11, x11, 1);
9681 __ Cbz(x11, &done);
9682 __ Cbz(w5, &try_h);
9683
9684 Label try_w;
9685 __ Bind(&try_w);
9686 __ Ldxr(w0, MemOperand(x10));
9687 __ Stxr(w5, w0, MemOperand(x10));
9688 __ Sub(x11, x11, 1);
9689 __ Cbz(x11, &done);
9690 __ Cbz(w5, &try_w);
9691
9692 Label try_x;
9693 __ Bind(&try_x);
9694 __ Ldxr(x0, MemOperand(x10));
9695 __ Stxr(w5, x0, MemOperand(x10));
9696 __ Sub(x11, x11, 1);
9697 __ Cbz(x11, &done);
9698 __ Cbz(w5, &try_x);
9699
9700 Label try_wp;
9701 __ Bind(&try_wp);
9702 __ Ldxp(w0, w1, MemOperand(x10));
9703 __ Stxp(w5, w0, w1, MemOperand(x10));
9704 __ Sub(x11, x11, 1);
9705 __ Cbz(x11, &done);
9706 __ Cbz(w5, &try_wp);
9707
9708 Label try_xp;
9709 __ Bind(&try_xp);
9710 __ Ldxp(x0, x1, MemOperand(x10));
9711 __ Stxp(w5, x0, x1, MemOperand(x10));
9712 __ Sub(x11, x11, 1);
9713 __ Cbz(x11, &done);
9714 __ Cbz(w5, &try_xp);
9715
9716 __ Bind(&done);
9717 // Trigger an error if x11 (watchdog) is zero.
9718 __ Cmp(x11, 0);
9719 __ Cset(x12, eq);
9720
9721 END();
9722 if (CAN_RUN()) {
9723 RUN();
9724
9725 // Check that the watchdog counter didn't run out.
9726 ASSERT_EQUAL_64(0, x12);
9727 }
9728 }
9729 #endif
9730
9731
9732 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
9733 // Check that the simulator occasionally makes store-exclusive fail.
TEST(ldaxr_stlxr_fail)9734 TEST(ldaxr_stlxr_fail) {
9735 uint64_t data[] = {0, 0, 0};
9736 uint64_t* data_aligned = AlignUp(data, kXRegSizeInBytes * 2);
9737
9738 // Impose a hard limit on the number of attempts, so the test cannot hang.
9739 static const uint64_t kWatchdog = 10000;
9740 Label done;
9741
9742 SETUP();
9743 START();
9744
9745 __ Mov(x10, reinterpret_cast<uintptr_t>(data_aligned));
9746 __ Mov(x11, kWatchdog);
9747
9748 // This loop is the opposite of what we normally do with ldxr and stxr; we
9749 // keep trying until we fail (or the watchdog counter runs out).
9750 Label try_b;
9751 __ Bind(&try_b);
9752 __ Ldxrb(w0, MemOperand(x10));
9753 __ Stxrb(w5, w0, MemOperand(x10));
9754 // Check the watchdog counter.
9755 __ Sub(x11, x11, 1);
9756 __ Cbz(x11, &done);
9757 // Check the exclusive-store result.
9758 __ Cbz(w5, &try_b);
9759
9760 Label try_h;
9761 __ Bind(&try_h);
9762 __ Ldaxrh(w0, MemOperand(x10));
9763 __ Stlxrh(w5, w0, MemOperand(x10));
9764 __ Sub(x11, x11, 1);
9765 __ Cbz(x11, &done);
9766 __ Cbz(w5, &try_h);
9767
9768 Label try_w;
9769 __ Bind(&try_w);
9770 __ Ldaxr(w0, MemOperand(x10));
9771 __ Stlxr(w5, w0, MemOperand(x10));
9772 __ Sub(x11, x11, 1);
9773 __ Cbz(x11, &done);
9774 __ Cbz(w5, &try_w);
9775
9776 Label try_x;
9777 __ Bind(&try_x);
9778 __ Ldaxr(x0, MemOperand(x10));
9779 __ Stlxr(w5, x0, MemOperand(x10));
9780 __ Sub(x11, x11, 1);
9781 __ Cbz(x11, &done);
9782 __ Cbz(w5, &try_x);
9783
9784 Label try_wp;
9785 __ Bind(&try_wp);
9786 __ Ldaxp(w0, w1, MemOperand(x10));
9787 __ Stlxp(w5, w0, w1, MemOperand(x10));
9788 __ Sub(x11, x11, 1);
9789 __ Cbz(x11, &done);
9790 __ Cbz(w5, &try_wp);
9791
9792 Label try_xp;
9793 __ Bind(&try_xp);
9794 __ Ldaxp(x0, x1, MemOperand(x10));
9795 __ Stlxp(w5, x0, x1, MemOperand(x10));
9796 __ Sub(x11, x11, 1);
9797 __ Cbz(x11, &done);
9798 __ Cbz(w5, &try_xp);
9799
9800 __ Bind(&done);
9801 // Trigger an error if x11 (watchdog) is zero.
9802 __ Cmp(x11, 0);
9803 __ Cset(x12, eq);
9804
9805 END();
9806 if (CAN_RUN()) {
9807 RUN();
9808
9809 // Check that the watchdog counter didn't run out.
9810 ASSERT_EQUAL_64(0, x12);
9811 }
9812 }
9813 #endif
9814
TEST(cas_casa_casl_casal_w)9815 TEST(cas_casa_casl_casal_w) {
9816 uint64_t data1 = 0x0123456789abcdef;
9817 uint64_t data2 = 0x0123456789abcdef;
9818 uint64_t data3 = 0x0123456789abcdef;
9819 uint64_t data4 = 0x0123456789abcdef;
9820 uint64_t data5 = 0x0123456789abcdef;
9821 uint64_t data6 = 0x0123456789abcdef;
9822 uint64_t data7 = 0x0123456789abcdef;
9823 uint64_t data8 = 0x0123456789abcdef;
9824
9825 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
9826
9827 START();
9828
9829 __ Mov(x21, reinterpret_cast<uintptr_t>(&data1) + 0);
9830 __ Mov(x22, reinterpret_cast<uintptr_t>(&data2) + 0);
9831 __ Mov(x23, reinterpret_cast<uintptr_t>(&data3) + 4);
9832 __ Mov(x24, reinterpret_cast<uintptr_t>(&data4) + 4);
9833 __ Mov(x25, reinterpret_cast<uintptr_t>(&data5) + 0);
9834 __ Mov(x26, reinterpret_cast<uintptr_t>(&data6) + 0);
9835 __ Mov(x27, reinterpret_cast<uintptr_t>(&data7) + 4);
9836 __ Mov(x28, reinterpret_cast<uintptr_t>(&data8) + 4);
9837
9838 __ Mov(x0, 0xffffffff);
9839
9840 __ Mov(x1, 0xfedcba9876543210);
9841 __ Mov(x2, 0x0123456789abcdef);
9842 __ Mov(x3, 0xfedcba9876543210);
9843 __ Mov(x4, 0x89abcdef01234567);
9844 __ Mov(x5, 0xfedcba9876543210);
9845 __ Mov(x6, 0x0123456789abcdef);
9846 __ Mov(x7, 0xfedcba9876543210);
9847 __ Mov(x8, 0x89abcdef01234567);
9848
9849 __ Cas(w1, w0, MemOperand(x21));
9850 __ Cas(w2, w0, MemOperand(x22));
9851 __ Casa(w3, w0, MemOperand(x23));
9852 __ Casa(w4, w0, MemOperand(x24));
9853 __ Casl(w5, w0, MemOperand(x25));
9854 __ Casl(w6, w0, MemOperand(x26));
9855 __ Casal(w7, w0, MemOperand(x27));
9856 __ Casal(w8, w0, MemOperand(x28));
9857
9858 END();
9859
9860 if (CAN_RUN()) {
9861 RUN();
9862
9863 ASSERT_EQUAL_64(0x89abcdef, x1);
9864 ASSERT_EQUAL_64(0x89abcdef, x2);
9865 ASSERT_EQUAL_64(0x01234567, x3);
9866 ASSERT_EQUAL_64(0x01234567, x4);
9867 ASSERT_EQUAL_64(0x89abcdef, x5);
9868 ASSERT_EQUAL_64(0x89abcdef, x6);
9869 ASSERT_EQUAL_64(0x01234567, x7);
9870 ASSERT_EQUAL_64(0x01234567, x8);
9871
9872 ASSERT_EQUAL_64(0x0123456789abcdef, data1);
9873 ASSERT_EQUAL_64(0x01234567ffffffff, data2);
9874 ASSERT_EQUAL_64(0x0123456789abcdef, data3);
9875 ASSERT_EQUAL_64(0xffffffff89abcdef, data4);
9876 ASSERT_EQUAL_64(0x0123456789abcdef, data5);
9877 ASSERT_EQUAL_64(0x01234567ffffffff, data6);
9878 ASSERT_EQUAL_64(0x0123456789abcdef, data7);
9879 ASSERT_EQUAL_64(0xffffffff89abcdef, data8);
9880 }
9881 }
9882
TEST(cas_casa_casl_casal_x)9883 TEST(cas_casa_casl_casal_x) {
9884 uint64_t data1 = 0x0123456789abcdef;
9885 uint64_t data2 = 0x0123456789abcdef;
9886 uint64_t data3 = 0x0123456789abcdef;
9887 uint64_t data4 = 0x0123456789abcdef;
9888 uint64_t data5 = 0x0123456789abcdef;
9889 uint64_t data6 = 0x0123456789abcdef;
9890 uint64_t data7 = 0x0123456789abcdef;
9891 uint64_t data8 = 0x0123456789abcdef;
9892
9893 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
9894
9895 START();
9896
9897 __ Mov(x21, reinterpret_cast<uintptr_t>(&data1));
9898 __ Mov(x22, reinterpret_cast<uintptr_t>(&data2));
9899 __ Mov(x23, reinterpret_cast<uintptr_t>(&data3));
9900 __ Mov(x24, reinterpret_cast<uintptr_t>(&data4));
9901 __ Mov(x25, reinterpret_cast<uintptr_t>(&data5));
9902 __ Mov(x26, reinterpret_cast<uintptr_t>(&data6));
9903 __ Mov(x27, reinterpret_cast<uintptr_t>(&data7));
9904 __ Mov(x28, reinterpret_cast<uintptr_t>(&data8));
9905
9906 __ Mov(x0, 0xffffffffffffffff);
9907
9908 __ Mov(x1, 0xfedcba9876543210);
9909 __ Mov(x2, 0x0123456789abcdef);
9910 __ Mov(x3, 0xfedcba9876543210);
9911 __ Mov(x4, 0x0123456789abcdef);
9912 __ Mov(x5, 0xfedcba9876543210);
9913 __ Mov(x6, 0x0123456789abcdef);
9914 __ Mov(x7, 0xfedcba9876543210);
9915 __ Mov(x8, 0x0123456789abcdef);
9916
9917 __ Cas(x1, x0, MemOperand(x21));
9918 __ Cas(x2, x0, MemOperand(x22));
9919 __ Casa(x3, x0, MemOperand(x23));
9920 __ Casa(x4, x0, MemOperand(x24));
9921 __ Casl(x5, x0, MemOperand(x25));
9922 __ Casl(x6, x0, MemOperand(x26));
9923 __ Casal(x7, x0, MemOperand(x27));
9924 __ Casal(x8, x0, MemOperand(x28));
9925
9926 END();
9927
9928 if (CAN_RUN()) {
9929 RUN();
9930
9931 ASSERT_EQUAL_64(0x0123456789abcdef, x1);
9932 ASSERT_EQUAL_64(0x0123456789abcdef, x2);
9933 ASSERT_EQUAL_64(0x0123456789abcdef, x3);
9934 ASSERT_EQUAL_64(0x0123456789abcdef, x4);
9935 ASSERT_EQUAL_64(0x0123456789abcdef, x5);
9936 ASSERT_EQUAL_64(0x0123456789abcdef, x6);
9937 ASSERT_EQUAL_64(0x0123456789abcdef, x7);
9938 ASSERT_EQUAL_64(0x0123456789abcdef, x8);
9939
9940 ASSERT_EQUAL_64(0x0123456789abcdef, data1);
9941 ASSERT_EQUAL_64(0xffffffffffffffff, data2);
9942 ASSERT_EQUAL_64(0x0123456789abcdef, data3);
9943 ASSERT_EQUAL_64(0xffffffffffffffff, data4);
9944 ASSERT_EQUAL_64(0x0123456789abcdef, data5);
9945 ASSERT_EQUAL_64(0xffffffffffffffff, data6);
9946 ASSERT_EQUAL_64(0x0123456789abcdef, data7);
9947 ASSERT_EQUAL_64(0xffffffffffffffff, data8);
9948 }
9949 }
9950
TEST(casb_casab_caslb_casalb)9951 TEST(casb_casab_caslb_casalb) {
9952 uint32_t data1 = 0x01234567;
9953 uint32_t data2 = 0x01234567;
9954 uint32_t data3 = 0x01234567;
9955 uint32_t data4 = 0x01234567;
9956 uint32_t data5 = 0x01234567;
9957 uint32_t data6 = 0x01234567;
9958 uint32_t data7 = 0x01234567;
9959 uint32_t data8 = 0x01234567;
9960
9961 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
9962
9963 START();
9964
9965 __ Mov(x21, reinterpret_cast<uintptr_t>(&data1) + 0);
9966 __ Mov(x22, reinterpret_cast<uintptr_t>(&data2) + 0);
9967 __ Mov(x23, reinterpret_cast<uintptr_t>(&data3) + 1);
9968 __ Mov(x24, reinterpret_cast<uintptr_t>(&data4) + 1);
9969 __ Mov(x25, reinterpret_cast<uintptr_t>(&data5) + 2);
9970 __ Mov(x26, reinterpret_cast<uintptr_t>(&data6) + 2);
9971 __ Mov(x27, reinterpret_cast<uintptr_t>(&data7) + 3);
9972 __ Mov(x28, reinterpret_cast<uintptr_t>(&data8) + 3);
9973
9974 __ Mov(x0, 0xff);
9975
9976 __ Mov(x1, 0x76543210);
9977 __ Mov(x2, 0x01234567);
9978 __ Mov(x3, 0x76543210);
9979 __ Mov(x4, 0x67012345);
9980 __ Mov(x5, 0x76543210);
9981 __ Mov(x6, 0x45670123);
9982 __ Mov(x7, 0x76543210);
9983 __ Mov(x8, 0x23456701);
9984
9985 __ Casb(w1, w0, MemOperand(x21));
9986 __ Casb(w2, w0, MemOperand(x22));
9987 __ Casab(w3, w0, MemOperand(x23));
9988 __ Casab(w4, w0, MemOperand(x24));
9989 __ Caslb(w5, w0, MemOperand(x25));
9990 __ Caslb(w6, w0, MemOperand(x26));
9991 __ Casalb(w7, w0, MemOperand(x27));
9992 __ Casalb(w8, w0, MemOperand(x28));
9993
9994 END();
9995
9996 if (CAN_RUN()) {
9997 RUN();
9998
9999 ASSERT_EQUAL_64(0x00000067, x1);
10000 ASSERT_EQUAL_64(0x00000067, x2);
10001 ASSERT_EQUAL_64(0x00000045, x3);
10002 ASSERT_EQUAL_64(0x00000045, x4);
10003 ASSERT_EQUAL_64(0x00000023, x5);
10004 ASSERT_EQUAL_64(0x00000023, x6);
10005 ASSERT_EQUAL_64(0x00000001, x7);
10006 ASSERT_EQUAL_64(0x00000001, x8);
10007
10008 ASSERT_EQUAL_64(0x01234567, data1);
10009 ASSERT_EQUAL_64(0x012345ff, data2);
10010 ASSERT_EQUAL_64(0x01234567, data3);
10011 ASSERT_EQUAL_64(0x0123ff67, data4);
10012 ASSERT_EQUAL_64(0x01234567, data5);
10013 ASSERT_EQUAL_64(0x01ff4567, data6);
10014 ASSERT_EQUAL_64(0x01234567, data7);
10015 ASSERT_EQUAL_64(0xff234567, data8);
10016 }
10017 }
10018
TEST(cash_casah_caslh_casalh)10019 TEST(cash_casah_caslh_casalh) {
10020 uint64_t data1 = 0x0123456789abcdef;
10021 uint64_t data2 = 0x0123456789abcdef;
10022 uint64_t data3 = 0x0123456789abcdef;
10023 uint64_t data4 = 0x0123456789abcdef;
10024 uint64_t data5 = 0x0123456789abcdef;
10025 uint64_t data6 = 0x0123456789abcdef;
10026 uint64_t data7 = 0x0123456789abcdef;
10027 uint64_t data8 = 0x0123456789abcdef;
10028
10029 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
10030
10031 START();
10032
10033 __ Mov(x21, reinterpret_cast<uintptr_t>(&data1) + 0);
10034 __ Mov(x22, reinterpret_cast<uintptr_t>(&data2) + 0);
10035 __ Mov(x23, reinterpret_cast<uintptr_t>(&data3) + 2);
10036 __ Mov(x24, reinterpret_cast<uintptr_t>(&data4) + 2);
10037 __ Mov(x25, reinterpret_cast<uintptr_t>(&data5) + 4);
10038 __ Mov(x26, reinterpret_cast<uintptr_t>(&data6) + 4);
10039 __ Mov(x27, reinterpret_cast<uintptr_t>(&data7) + 6);
10040 __ Mov(x28, reinterpret_cast<uintptr_t>(&data8) + 6);
10041
10042 __ Mov(x0, 0xffff);
10043
10044 __ Mov(x1, 0xfedcba9876543210);
10045 __ Mov(x2, 0x0123456789abcdef);
10046 __ Mov(x3, 0xfedcba9876543210);
10047 __ Mov(x4, 0xcdef0123456789ab);
10048 __ Mov(x5, 0xfedcba9876543210);
10049 __ Mov(x6, 0x89abcdef01234567);
10050 __ Mov(x7, 0xfedcba9876543210);
10051 __ Mov(x8, 0x456789abcdef0123);
10052
10053 __ Cash(w1, w0, MemOperand(x21));
10054 __ Cash(w2, w0, MemOperand(x22));
10055 __ Casah(w3, w0, MemOperand(x23));
10056 __ Casah(w4, w0, MemOperand(x24));
10057 __ Caslh(w5, w0, MemOperand(x25));
10058 __ Caslh(w6, w0, MemOperand(x26));
10059 __ Casalh(w7, w0, MemOperand(x27));
10060 __ Casalh(w8, w0, MemOperand(x28));
10061
10062 END();
10063
10064 if (CAN_RUN()) {
10065 RUN();
10066
10067 ASSERT_EQUAL_64(0x0000cdef, x1);
10068 ASSERT_EQUAL_64(0x0000cdef, x2);
10069 ASSERT_EQUAL_64(0x000089ab, x3);
10070 ASSERT_EQUAL_64(0x000089ab, x4);
10071 ASSERT_EQUAL_64(0x00004567, x5);
10072 ASSERT_EQUAL_64(0x00004567, x6);
10073 ASSERT_EQUAL_64(0x00000123, x7);
10074 ASSERT_EQUAL_64(0x00000123, x8);
10075
10076 ASSERT_EQUAL_64(0x0123456789abcdef, data1);
10077 ASSERT_EQUAL_64(0x0123456789abffff, data2);
10078 ASSERT_EQUAL_64(0x0123456789abcdef, data3);
10079 ASSERT_EQUAL_64(0x01234567ffffcdef, data4);
10080 ASSERT_EQUAL_64(0x0123456789abcdef, data5);
10081 ASSERT_EQUAL_64(0x0123ffff89abcdef, data6);
10082 ASSERT_EQUAL_64(0x0123456789abcdef, data7);
10083 ASSERT_EQUAL_64(0xffff456789abcdef, data8);
10084 }
10085 }
10086
TEST(casp_caspa_caspl_caspal_w)10087 TEST(casp_caspa_caspl_caspal_w) {
10088 uint64_t data1[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10089 uint64_t data2[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10090 uint64_t data3[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10091 uint64_t data4[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10092 uint64_t data5[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10093 uint64_t data6[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10094 uint64_t data7[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10095 uint64_t data8[] = {0x7766554433221100, 0xffeeddccbbaa9988};
10096
10097 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
10098
10099 START();
10100
10101 __ Mov(x21, reinterpret_cast<uintptr_t>(data1) + 0);
10102 __ Mov(x22, reinterpret_cast<uintptr_t>(data2) + 0);
10103 __ Mov(x23, reinterpret_cast<uintptr_t>(data3) + 8);
10104 __ Mov(x24, reinterpret_cast<uintptr_t>(data4) + 8);
10105 __ Mov(x25, reinterpret_cast<uintptr_t>(data5) + 8);
10106 __ Mov(x26, reinterpret_cast<uintptr_t>(data6) + 8);
10107 __ Mov(x27, reinterpret_cast<uintptr_t>(data7) + 0);
10108 __ Mov(x28, reinterpret_cast<uintptr_t>(data8) + 0);
10109
10110 __ Mov(x0, 0xfff00fff);
10111 __ Mov(x1, 0xfff11fff);
10112
10113 __ Mov(x2, 0x77665544);
10114 __ Mov(x3, 0x33221100);
10115 __ Mov(x4, 0x33221100);
10116 __ Mov(x5, 0x77665544);
10117
10118 __ Mov(x6, 0xffeeddcc);
10119 __ Mov(x7, 0xbbaa9988);
10120 __ Mov(x8, 0xbbaa9988);
10121 __ Mov(x9, 0xffeeddcc);
10122
10123 __ Mov(x10, 0xffeeddcc);
10124 __ Mov(x11, 0xbbaa9988);
10125 __ Mov(x12, 0xbbaa9988);
10126 __ Mov(x13, 0xffeeddcc);
10127
10128 __ Mov(x14, 0x77665544);
10129 __ Mov(x15, 0x33221100);
10130 __ Mov(x16, 0x33221100);
10131 __ Mov(x17, 0x77665544);
10132
10133 __ Casp(w2, w3, w0, w1, MemOperand(x21));
10134 __ Casp(w4, w5, w0, w1, MemOperand(x22));
10135 __ Caspa(w6, w7, w0, w1, MemOperand(x23));
10136 __ Caspa(w8, w9, w0, w1, MemOperand(x24));
10137 __ Caspl(w10, w11, w0, w1, MemOperand(x25));
10138 __ Caspl(w12, w13, w0, w1, MemOperand(x26));
10139 __ Caspal(w14, w15, w0, w1, MemOperand(x27));
10140 __ Caspal(w16, w17, w0, w1, MemOperand(x28));
10141
10142 END();
10143
10144 if (CAN_RUN()) {
10145 RUN();
10146
10147 ASSERT_EQUAL_64(0x33221100, x2);
10148 ASSERT_EQUAL_64(0x77665544, x3);
10149 ASSERT_EQUAL_64(0x33221100, x4);
10150 ASSERT_EQUAL_64(0x77665544, x5);
10151 ASSERT_EQUAL_64(0xbbaa9988, x6);
10152 ASSERT_EQUAL_64(0xffeeddcc, x7);
10153 ASSERT_EQUAL_64(0xbbaa9988, x8);
10154 ASSERT_EQUAL_64(0xffeeddcc, x9);
10155 ASSERT_EQUAL_64(0xbbaa9988, x10);
10156 ASSERT_EQUAL_64(0xffeeddcc, x11);
10157 ASSERT_EQUAL_64(0xbbaa9988, x12);
10158 ASSERT_EQUAL_64(0xffeeddcc, x13);
10159 ASSERT_EQUAL_64(0x33221100, x14);
10160 ASSERT_EQUAL_64(0x77665544, x15);
10161 ASSERT_EQUAL_64(0x33221100, x16);
10162 ASSERT_EQUAL_64(0x77665544, x17);
10163
10164 ASSERT_EQUAL_64(0x7766554433221100, data1[0]);
10165 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data1[1]);
10166 ASSERT_EQUAL_64(0xfff11ffffff00fff, data2[0]);
10167 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data2[1]);
10168 ASSERT_EQUAL_64(0x7766554433221100, data3[0]);
10169 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data3[1]);
10170 ASSERT_EQUAL_64(0x7766554433221100, data4[0]);
10171 ASSERT_EQUAL_64(0xfff11ffffff00fff, data4[1]);
10172 ASSERT_EQUAL_64(0x7766554433221100, data5[0]);
10173 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data5[1]);
10174 ASSERT_EQUAL_64(0x7766554433221100, data6[0]);
10175 ASSERT_EQUAL_64(0xfff11ffffff00fff, data6[1]);
10176 ASSERT_EQUAL_64(0x7766554433221100, data7[0]);
10177 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data7[1]);
10178 ASSERT_EQUAL_64(0xfff11ffffff00fff, data8[0]);
10179 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data8[1]);
10180 }
10181 }
10182
TEST(casp_caspa_caspl_caspal_x)10183 TEST(casp_caspa_caspl_caspal_x) {
10184 alignas(kXRegSizeInBytes * 2) uint64_t data1[] = {0x7766554433221100,
10185 0xffeeddccbbaa9988,
10186 0xfedcba9876543210,
10187 0x0123456789abcdef};
10188 alignas(kXRegSizeInBytes * 2) uint64_t data2[] = {0x7766554433221100,
10189 0xffeeddccbbaa9988,
10190 0xfedcba9876543210,
10191 0x0123456789abcdef};
10192 alignas(kXRegSizeInBytes * 2) uint64_t data3[] = {0x7766554433221100,
10193 0xffeeddccbbaa9988,
10194 0xfedcba9876543210,
10195 0x0123456789abcdef};
10196 alignas(kXRegSizeInBytes * 2) uint64_t data4[] = {0x7766554433221100,
10197 0xffeeddccbbaa9988,
10198 0xfedcba9876543210,
10199 0x0123456789abcdef};
10200 alignas(kXRegSizeInBytes * 2) uint64_t data5[] = {0x7766554433221100,
10201 0xffeeddccbbaa9988,
10202 0xfedcba9876543210,
10203 0x0123456789abcdef};
10204 alignas(kXRegSizeInBytes * 2) uint64_t data6[] = {0x7766554433221100,
10205 0xffeeddccbbaa9988,
10206 0xfedcba9876543210,
10207 0x0123456789abcdef};
10208 alignas(kXRegSizeInBytes * 2) uint64_t data7[] = {0x7766554433221100,
10209 0xffeeddccbbaa9988,
10210 0xfedcba9876543210,
10211 0x0123456789abcdef};
10212 alignas(kXRegSizeInBytes * 2) uint64_t data8[] = {0x7766554433221100,
10213 0xffeeddccbbaa9988,
10214 0xfedcba9876543210,
10215 0x0123456789abcdef};
10216
10217 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
10218
10219 START();
10220
10221 __ Mov(x21, reinterpret_cast<uintptr_t>(data1) + 0);
10222 __ Mov(x22, reinterpret_cast<uintptr_t>(data2) + 0);
10223 __ Mov(x23, reinterpret_cast<uintptr_t>(data3) + 16);
10224 __ Mov(x24, reinterpret_cast<uintptr_t>(data4) + 16);
10225 __ Mov(x25, reinterpret_cast<uintptr_t>(data5) + 16);
10226 __ Mov(x26, reinterpret_cast<uintptr_t>(data6) + 16);
10227 __ Mov(x27, reinterpret_cast<uintptr_t>(data7) + 0);
10228 __ Mov(x28, reinterpret_cast<uintptr_t>(data8) + 0);
10229
10230 __ Mov(x0, 0xfffffff00fffffff);
10231 __ Mov(x1, 0xfffffff11fffffff);
10232
10233 __ Mov(x2, 0xffeeddccbbaa9988);
10234 __ Mov(x3, 0x7766554433221100);
10235 __ Mov(x4, 0x7766554433221100);
10236 __ Mov(x5, 0xffeeddccbbaa9988);
10237
10238 __ Mov(x6, 0x0123456789abcdef);
10239 __ Mov(x7, 0xfedcba9876543210);
10240 __ Mov(x8, 0xfedcba9876543210);
10241 __ Mov(x9, 0x0123456789abcdef);
10242
10243 __ Mov(x10, 0x0123456789abcdef);
10244 __ Mov(x11, 0xfedcba9876543210);
10245 __ Mov(x12, 0xfedcba9876543210);
10246 __ Mov(x13, 0x0123456789abcdef);
10247
10248 __ Mov(x14, 0xffeeddccbbaa9988);
10249 __ Mov(x15, 0x7766554433221100);
10250 __ Mov(x16, 0x7766554433221100);
10251 __ Mov(x17, 0xffeeddccbbaa9988);
10252
10253 __ Casp(x2, x3, x0, x1, MemOperand(x21));
10254 __ Casp(x4, x5, x0, x1, MemOperand(x22));
10255 __ Caspa(x6, x7, x0, x1, MemOperand(x23));
10256 __ Caspa(x8, x9, x0, x1, MemOperand(x24));
10257 __ Caspl(x10, x11, x0, x1, MemOperand(x25));
10258 __ Caspl(x12, x13, x0, x1, MemOperand(x26));
10259 __ Caspal(x14, x15, x0, x1, MemOperand(x27));
10260 __ Caspal(x16, x17, x0, x1, MemOperand(x28));
10261
10262 END();
10263
10264 if (CAN_RUN()) {
10265 RUN();
10266
10267 ASSERT_EQUAL_64(0x7766554433221100, x2);
10268 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x3);
10269 ASSERT_EQUAL_64(0x7766554433221100, x4);
10270 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x5);
10271
10272 ASSERT_EQUAL_64(0xfedcba9876543210, x6);
10273 ASSERT_EQUAL_64(0x0123456789abcdef, x7);
10274 ASSERT_EQUAL_64(0xfedcba9876543210, x8);
10275 ASSERT_EQUAL_64(0x0123456789abcdef, x9);
10276
10277 ASSERT_EQUAL_64(0xfedcba9876543210, x10);
10278 ASSERT_EQUAL_64(0x0123456789abcdef, x11);
10279 ASSERT_EQUAL_64(0xfedcba9876543210, x12);
10280 ASSERT_EQUAL_64(0x0123456789abcdef, x13);
10281
10282 ASSERT_EQUAL_64(0x7766554433221100, x14);
10283 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x15);
10284 ASSERT_EQUAL_64(0x7766554433221100, x16);
10285 ASSERT_EQUAL_64(0xffeeddccbbaa9988, x17);
10286
10287 ASSERT_EQUAL_64(0x7766554433221100, data1[0]);
10288 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data1[1]);
10289 ASSERT_EQUAL_64(0xfedcba9876543210, data1[2]);
10290 ASSERT_EQUAL_64(0x0123456789abcdef, data1[3]);
10291
10292 ASSERT_EQUAL_64(0xfffffff00fffffff, data2[0]);
10293 ASSERT_EQUAL_64(0xfffffff11fffffff, data2[1]);
10294 ASSERT_EQUAL_64(0xfedcba9876543210, data2[2]);
10295 ASSERT_EQUAL_64(0x0123456789abcdef, data2[3]);
10296
10297 ASSERT_EQUAL_64(0x7766554433221100, data3[0]);
10298 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data3[1]);
10299 ASSERT_EQUAL_64(0xfedcba9876543210, data3[2]);
10300 ASSERT_EQUAL_64(0x0123456789abcdef, data3[3]);
10301
10302 ASSERT_EQUAL_64(0x7766554433221100, data4[0]);
10303 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data4[1]);
10304 ASSERT_EQUAL_64(0xfffffff00fffffff, data4[2]);
10305 ASSERT_EQUAL_64(0xfffffff11fffffff, data4[3]);
10306
10307 ASSERT_EQUAL_64(0x7766554433221100, data5[0]);
10308 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data5[1]);
10309 ASSERT_EQUAL_64(0xfedcba9876543210, data5[2]);
10310 ASSERT_EQUAL_64(0x0123456789abcdef, data5[3]);
10311
10312 ASSERT_EQUAL_64(0x7766554433221100, data6[0]);
10313 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data6[1]);
10314 ASSERT_EQUAL_64(0xfffffff00fffffff, data6[2]);
10315 ASSERT_EQUAL_64(0xfffffff11fffffff, data6[3]);
10316
10317 ASSERT_EQUAL_64(0x7766554433221100, data7[0]);
10318 ASSERT_EQUAL_64(0xffeeddccbbaa9988, data7[1]);
10319 ASSERT_EQUAL_64(0xfedcba9876543210, data7[2]);
10320 ASSERT_EQUAL_64(0x0123456789abcdef, data7[3]);
10321
10322 ASSERT_EQUAL_64(0xfffffff00fffffff, data8[0]);
10323 ASSERT_EQUAL_64(0xfffffff11fffffff, data8[1]);
10324 ASSERT_EQUAL_64(0xfedcba9876543210, data8[2]);
10325 ASSERT_EQUAL_64(0x0123456789abcdef, data8[3]);
10326 }
10327 }
10328
10329
10330 typedef void (MacroAssembler::*AtomicMemoryLoadSignature)(
10331 const Register& rs, const Register& rt, const MemOperand& src);
10332 typedef void (MacroAssembler::*AtomicMemoryStoreSignature)(
10333 const Register& rs, const MemOperand& src);
10334
AtomicMemoryWHelper(AtomicMemoryLoadSignature * load_funcs,AtomicMemoryStoreSignature * store_funcs,uint64_t arg1,uint64_t arg2,uint64_t expected,uint64_t result_mask)10335 void AtomicMemoryWHelper(AtomicMemoryLoadSignature* load_funcs,
10336 AtomicMemoryStoreSignature* store_funcs,
10337 uint64_t arg1,
10338 uint64_t arg2,
10339 uint64_t expected,
10340 uint64_t result_mask) {
10341 uint64_t data0[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10342 uint64_t data1[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10343 uint64_t data2[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10344 uint64_t data3[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10345 uint64_t data4[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10346 uint64_t data5[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10347
10348 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
10349 START();
10350
10351 __ Mov(x20, reinterpret_cast<uintptr_t>(data0));
10352 __ Mov(x21, reinterpret_cast<uintptr_t>(data1));
10353 __ Mov(x22, reinterpret_cast<uintptr_t>(data2));
10354 __ Mov(x23, reinterpret_cast<uintptr_t>(data3));
10355
10356 __ Mov(x0, arg1);
10357 __ Mov(x1, arg1);
10358 __ Mov(x2, arg1);
10359 __ Mov(x3, arg1);
10360
10361 (masm.*(load_funcs[0]))(w0, w10, MemOperand(x20));
10362 (masm.*(load_funcs[1]))(w1, w11, MemOperand(x21));
10363 (masm.*(load_funcs[2]))(w2, w12, MemOperand(x22));
10364 (masm.*(load_funcs[3]))(w3, w13, MemOperand(x23));
10365
10366 if (store_funcs != NULL) {
10367 __ Mov(x24, reinterpret_cast<uintptr_t>(data4));
10368 __ Mov(x25, reinterpret_cast<uintptr_t>(data5));
10369 __ Mov(x4, arg1);
10370 __ Mov(x5, arg1);
10371
10372 (masm.*(store_funcs[0]))(w4, MemOperand(x24));
10373 (masm.*(store_funcs[1]))(w5, MemOperand(x25));
10374 }
10375
10376 END();
10377
10378 if (CAN_RUN()) {
10379 RUN();
10380
10381 uint64_t stored_value = arg2 & result_mask;
10382 ASSERT_EQUAL_64(stored_value, x10);
10383 ASSERT_EQUAL_64(stored_value, x11);
10384 ASSERT_EQUAL_64(stored_value, x12);
10385 ASSERT_EQUAL_64(stored_value, x13);
10386
10387 // The data fields contain arg2 already then only the bits masked by
10388 // result_mask are overwritten.
10389 uint64_t final_expected = (arg2 & ~result_mask) | (expected & result_mask);
10390 ASSERT_EQUAL_64(final_expected, data0[0]);
10391 ASSERT_EQUAL_64(final_expected, data1[0]);
10392 ASSERT_EQUAL_64(final_expected, data2[0]);
10393 ASSERT_EQUAL_64(final_expected, data3[0]);
10394
10395 if (store_funcs != NULL) {
10396 ASSERT_EQUAL_64(final_expected, data4[0]);
10397 ASSERT_EQUAL_64(final_expected, data5[0]);
10398 }
10399 }
10400 }
10401
AtomicMemoryXHelper(AtomicMemoryLoadSignature * load_funcs,AtomicMemoryStoreSignature * store_funcs,uint64_t arg1,uint64_t arg2,uint64_t expected)10402 void AtomicMemoryXHelper(AtomicMemoryLoadSignature* load_funcs,
10403 AtomicMemoryStoreSignature* store_funcs,
10404 uint64_t arg1,
10405 uint64_t arg2,
10406 uint64_t expected) {
10407 uint64_t data0[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10408 uint64_t data1[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10409 uint64_t data2[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10410 uint64_t data3[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10411 uint64_t data4[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10412 uint64_t data5[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {arg2, 0};
10413
10414 SETUP_WITH_FEATURES(CPUFeatures::kAtomics);
10415 START();
10416
10417 __ Mov(x20, reinterpret_cast<uintptr_t>(data0));
10418 __ Mov(x21, reinterpret_cast<uintptr_t>(data1));
10419 __ Mov(x22, reinterpret_cast<uintptr_t>(data2));
10420 __ Mov(x23, reinterpret_cast<uintptr_t>(data3));
10421
10422 __ Mov(x0, arg1);
10423 __ Mov(x1, arg1);
10424 __ Mov(x2, arg1);
10425 __ Mov(x3, arg1);
10426
10427 (masm.*(load_funcs[0]))(x0, x10, MemOperand(x20));
10428 (masm.*(load_funcs[1]))(x1, x11, MemOperand(x21));
10429 (masm.*(load_funcs[2]))(x2, x12, MemOperand(x22));
10430 (masm.*(load_funcs[3]))(x3, x13, MemOperand(x23));
10431
10432 if (store_funcs != NULL) {
10433 __ Mov(x24, reinterpret_cast<uintptr_t>(data4));
10434 __ Mov(x25, reinterpret_cast<uintptr_t>(data5));
10435 __ Mov(x4, arg1);
10436 __ Mov(x5, arg1);
10437
10438 (masm.*(store_funcs[0]))(x4, MemOperand(x24));
10439 (masm.*(store_funcs[1]))(x5, MemOperand(x25));
10440 }
10441
10442 END();
10443
10444 if (CAN_RUN()) {
10445 RUN();
10446
10447 ASSERT_EQUAL_64(arg2, x10);
10448 ASSERT_EQUAL_64(arg2, x11);
10449 ASSERT_EQUAL_64(arg2, x12);
10450 ASSERT_EQUAL_64(arg2, x13);
10451
10452 ASSERT_EQUAL_64(expected, data0[0]);
10453 ASSERT_EQUAL_64(expected, data1[0]);
10454 ASSERT_EQUAL_64(expected, data2[0]);
10455 ASSERT_EQUAL_64(expected, data3[0]);
10456
10457 if (store_funcs != NULL) {
10458 ASSERT_EQUAL_64(expected, data4[0]);
10459 ASSERT_EQUAL_64(expected, data5[0]);
10460 }
10461 }
10462 }
10463
10464 // clang-format off
10465 #define MAKE_LOADS(NAME) \
10466 {&MacroAssembler::Ld##NAME, \
10467 &MacroAssembler::Ld##NAME##a, \
10468 &MacroAssembler::Ld##NAME##l, \
10469 &MacroAssembler::Ld##NAME##al}
10470 #define MAKE_STORES(NAME) \
10471 {&MacroAssembler::St##NAME, &MacroAssembler::St##NAME##l}
10472
10473 #define MAKE_B_LOADS(NAME) \
10474 {&MacroAssembler::Ld##NAME##b, \
10475 &MacroAssembler::Ld##NAME##ab, \
10476 &MacroAssembler::Ld##NAME##lb, \
10477 &MacroAssembler::Ld##NAME##alb}
10478 #define MAKE_B_STORES(NAME) \
10479 {&MacroAssembler::St##NAME##b, &MacroAssembler::St##NAME##lb}
10480
10481 #define MAKE_H_LOADS(NAME) \
10482 {&MacroAssembler::Ld##NAME##h, \
10483 &MacroAssembler::Ld##NAME##ah, \
10484 &MacroAssembler::Ld##NAME##lh, \
10485 &MacroAssembler::Ld##NAME##alh}
10486 #define MAKE_H_STORES(NAME) \
10487 {&MacroAssembler::St##NAME##h, &MacroAssembler::St##NAME##lh}
10488 // clang-format on
10489
TEST(atomic_memory_add)10490 TEST(atomic_memory_add) {
10491 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(add);
10492 AtomicMemoryStoreSignature stores[] = MAKE_STORES(add);
10493 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(add);
10494 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(add);
10495 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(add);
10496 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(add);
10497
10498 // The arguments are chosen to have two useful properties:
10499 // * When multiplied by small values (such as a register index), this value
10500 // is clearly readable in the result.
10501 // * The value is not formed from repeating fixed-size smaller values, so it
10502 // can be used to detect endianness-related errors.
10503 uint64_t arg1 = 0x0100001000100101;
10504 uint64_t arg2 = 0x0200002000200202;
10505 uint64_t expected = arg1 + arg2;
10506
10507 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10508 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10509 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10510 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10511 }
10512
TEST(atomic_memory_clr)10513 TEST(atomic_memory_clr) {
10514 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(clr);
10515 AtomicMemoryStoreSignature stores[] = MAKE_STORES(clr);
10516 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(clr);
10517 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(clr);
10518 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(clr);
10519 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(clr);
10520
10521 uint64_t arg1 = 0x0300003000300303;
10522 uint64_t arg2 = 0x0500005000500505;
10523 uint64_t expected = arg2 & ~arg1;
10524
10525 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10526 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10527 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10528 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10529 }
10530
TEST(atomic_memory_eor)10531 TEST(atomic_memory_eor) {
10532 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(eor);
10533 AtomicMemoryStoreSignature stores[] = MAKE_STORES(eor);
10534 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(eor);
10535 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(eor);
10536 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(eor);
10537 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(eor);
10538
10539 uint64_t arg1 = 0x0300003000300303;
10540 uint64_t arg2 = 0x0500005000500505;
10541 uint64_t expected = arg1 ^ arg2;
10542
10543 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10544 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10545 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10546 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10547 }
10548
TEST(atomic_memory_set)10549 TEST(atomic_memory_set) {
10550 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(set);
10551 AtomicMemoryStoreSignature stores[] = MAKE_STORES(set);
10552 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(set);
10553 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(set);
10554 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(set);
10555 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(set);
10556
10557 uint64_t arg1 = 0x0300003000300303;
10558 uint64_t arg2 = 0x0500005000500505;
10559 uint64_t expected = arg1 | arg2;
10560
10561 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10562 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10563 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10564 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10565 }
10566
TEST(atomic_memory_smax)10567 TEST(atomic_memory_smax) {
10568 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(smax);
10569 AtomicMemoryStoreSignature stores[] = MAKE_STORES(smax);
10570 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(smax);
10571 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(smax);
10572 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(smax);
10573 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(smax);
10574
10575 uint64_t arg1 = 0x8100000080108181;
10576 uint64_t arg2 = 0x0100001000100101;
10577 uint64_t expected = 0x0100001000100101;
10578
10579 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10580 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10581 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10582 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10583 }
10584
TEST(atomic_memory_smin)10585 TEST(atomic_memory_smin) {
10586 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(smin);
10587 AtomicMemoryStoreSignature stores[] = MAKE_STORES(smin);
10588 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(smin);
10589 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(smin);
10590 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(smin);
10591 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(smin);
10592
10593 uint64_t arg1 = 0x8100000080108181;
10594 uint64_t arg2 = 0x0100001000100101;
10595 uint64_t expected = 0x8100000080108181;
10596
10597 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10598 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10599 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10600 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10601 }
10602
TEST(atomic_memory_umax)10603 TEST(atomic_memory_umax) {
10604 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(umax);
10605 AtomicMemoryStoreSignature stores[] = MAKE_STORES(umax);
10606 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(umax);
10607 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(umax);
10608 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(umax);
10609 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(umax);
10610
10611 uint64_t arg1 = 0x8100000080108181;
10612 uint64_t arg2 = 0x0100001000100101;
10613 uint64_t expected = 0x8100000080108181;
10614
10615 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10616 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10617 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10618 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10619 }
10620
TEST(atomic_memory_umin)10621 TEST(atomic_memory_umin) {
10622 AtomicMemoryLoadSignature loads[] = MAKE_LOADS(umin);
10623 AtomicMemoryStoreSignature stores[] = MAKE_STORES(umin);
10624 AtomicMemoryLoadSignature b_loads[] = MAKE_B_LOADS(umin);
10625 AtomicMemoryStoreSignature b_stores[] = MAKE_B_STORES(umin);
10626 AtomicMemoryLoadSignature h_loads[] = MAKE_H_LOADS(umin);
10627 AtomicMemoryStoreSignature h_stores[] = MAKE_H_STORES(umin);
10628
10629 uint64_t arg1 = 0x8100000080108181;
10630 uint64_t arg2 = 0x0100001000100101;
10631 uint64_t expected = 0x0100001000100101;
10632
10633 AtomicMemoryWHelper(b_loads, b_stores, arg1, arg2, expected, kByteMask);
10634 AtomicMemoryWHelper(h_loads, h_stores, arg1, arg2, expected, kHalfWordMask);
10635 AtomicMemoryWHelper(loads, stores, arg1, arg2, expected, kWordMask);
10636 AtomicMemoryXHelper(loads, stores, arg1, arg2, expected);
10637 }
10638
TEST(atomic_memory_swp)10639 TEST(atomic_memory_swp) {
10640 AtomicMemoryLoadSignature loads[] = {&MacroAssembler::Swp,
10641 &MacroAssembler::Swpa,
10642 &MacroAssembler::Swpl,
10643 &MacroAssembler::Swpal};
10644 AtomicMemoryLoadSignature b_loads[] = {&MacroAssembler::Swpb,
10645 &MacroAssembler::Swpab,
10646 &MacroAssembler::Swplb,
10647 &MacroAssembler::Swpalb};
10648 AtomicMemoryLoadSignature h_loads[] = {&MacroAssembler::Swph,
10649 &MacroAssembler::Swpah,
10650 &MacroAssembler::Swplh,
10651 &MacroAssembler::Swpalh};
10652
10653 uint64_t arg1 = 0x0100001000100101;
10654 uint64_t arg2 = 0x0200002000200202;
10655 uint64_t expected = 0x0100001000100101;
10656
10657 // SWP functions have equivalent signatures to the Atomic Memory LD functions
10658 // so we can use the same helper but without the ST aliases.
10659 AtomicMemoryWHelper(b_loads, NULL, arg1, arg2, expected, kByteMask);
10660 AtomicMemoryWHelper(h_loads, NULL, arg1, arg2, expected, kHalfWordMask);
10661 AtomicMemoryWHelper(loads, NULL, arg1, arg2, expected, kWordMask);
10662 AtomicMemoryXHelper(loads, NULL, arg1, arg2, expected);
10663 }
10664
10665
TEST(ldaprb_ldaprh_ldapr)10666 TEST(ldaprb_ldaprh_ldapr) {
10667 uint64_t data0[] = {0x1010101010101010, 0x1010101010101010};
10668 uint64_t data1[] = {0x1010101010101010, 0x1010101010101010};
10669 uint64_t data2[] = {0x1010101010101010, 0x1010101010101010};
10670 uint64_t data3[] = {0x1010101010101010, 0x1010101010101010};
10671
10672 uint64_t* data0_aligned = AlignUp(data0, kXRegSizeInBytes * 2);
10673 uint64_t* data1_aligned = AlignUp(data1, kXRegSizeInBytes * 2);
10674 uint64_t* data2_aligned = AlignUp(data2, kXRegSizeInBytes * 2);
10675 uint64_t* data3_aligned = AlignUp(data3, kXRegSizeInBytes * 2);
10676
10677 SETUP_WITH_FEATURES(CPUFeatures::kRCpc);
10678 START();
10679
10680 __ Mov(x20, reinterpret_cast<uintptr_t>(data0_aligned));
10681 __ Mov(x21, reinterpret_cast<uintptr_t>(data1_aligned));
10682 __ Mov(x22, reinterpret_cast<uintptr_t>(data2_aligned));
10683 __ Mov(x23, reinterpret_cast<uintptr_t>(data3_aligned));
10684
10685 __ Ldaprb(w0, MemOperand(x20));
10686 __ Ldaprh(w1, MemOperand(x21));
10687 __ Ldapr(w2, MemOperand(x22));
10688 __ Ldapr(x3, MemOperand(x23));
10689
10690 END();
10691
10692 if (CAN_RUN()) {
10693 RUN();
10694 ASSERT_EQUAL_64(0x10, x0);
10695 ASSERT_EQUAL_64(0x1010, x1);
10696 ASSERT_EQUAL_64(0x10101010, x2);
10697 ASSERT_EQUAL_64(0x1010101010101010, x3);
10698 }
10699 }
10700
10701
TEST(ldapurb_ldapurh_ldapur)10702 TEST(ldapurb_ldapurh_ldapur) {
10703 uint64_t data[]
10704 __attribute__((aligned(kXRegSizeInBytes * 2))) = {0x0123456789abcdef,
10705 0xfedcba9876543210};
10706
10707 uintptr_t data_base = reinterpret_cast<uintptr_t>(data);
10708
10709 SETUP_WITH_FEATURES(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm);
10710 START();
10711
10712 __ Mov(x20, data_base);
10713 __ Mov(x21, data_base + 2 * sizeof(data[0]));
10714
10715 __ Ldaprb(w0, MemOperand(x20));
10716 __ Ldaprh(w1, MemOperand(x20));
10717 __ Ldapr(w2, MemOperand(x20));
10718 __ Ldapr(x3, MemOperand(x20));
10719 __ Ldaprb(w4, MemOperand(x20, 12));
10720 __ Ldaprh(w5, MemOperand(x20, 8));
10721 __ Ldapr(w6, MemOperand(x20, 10));
10722 __ Ldapr(x7, MemOperand(x20, 7));
10723 __ Ldaprb(w8, MemOperand(x21, -1));
10724 __ Ldaprh(w9, MemOperand(x21, -3));
10725 __ Ldapr(w10, MemOperand(x21, -9));
10726 __ Ldapr(x11, MemOperand(x21, -12));
10727
10728 END();
10729
10730 if (CAN_RUN()) {
10731 RUN();
10732 ASSERT_EQUAL_64(0xef, x0);
10733 ASSERT_EQUAL_64(0xcdef, x1);
10734 ASSERT_EQUAL_64(0x89abcdef, x2);
10735 ASSERT_EQUAL_64(0x0123456789abcdef, x3);
10736 ASSERT_EQUAL_64(0x98, x4);
10737 ASSERT_EQUAL_64(0x3210, x5);
10738 ASSERT_EQUAL_64(0xba987654, x6);
10739 ASSERT_EQUAL_64(0xdcba987654321001, x7);
10740 ASSERT_EQUAL_64(0xfe, x8);
10741 ASSERT_EQUAL_64(0xdcba, x9);
10742 ASSERT_EQUAL_64(0x54321001, x10);
10743 ASSERT_EQUAL_64(0x7654321001234567, x11);
10744 }
10745 }
10746
10747
TEST(ldapursb_ldapursh_ldapursw)10748 TEST(ldapursb_ldapursh_ldapursw) {
10749 uint64_t data[]
10750 __attribute__((aligned(kXRegSizeInBytes * 2))) = {0x0123456789abcdef,
10751 0xfedcba9876543210};
10752
10753 uintptr_t data_base = reinterpret_cast<uintptr_t>(data);
10754
10755 SETUP_WITH_FEATURES(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm);
10756 START();
10757
10758 __ Mov(x20, data_base);
10759 __ Mov(x21, data_base + 2 * sizeof(data[0]));
10760
10761 __ Ldapursb(w0, MemOperand(x20));
10762 __ Ldapursb(x1, MemOperand(x20));
10763 __ Ldapursh(w2, MemOperand(x20));
10764 __ Ldapursh(x3, MemOperand(x20));
10765 __ Ldapursw(x4, MemOperand(x20));
10766 __ Ldapursb(w5, MemOperand(x20, 12));
10767 __ Ldapursb(x6, MemOperand(x20, 12));
10768 __ Ldapursh(w7, MemOperand(x20, 13));
10769 __ Ldapursh(x8, MemOperand(x20, 13));
10770 __ Ldapursw(x9, MemOperand(x20, 10));
10771 __ Ldapursb(w10, MemOperand(x21, -1));
10772 __ Ldapursb(x11, MemOperand(x21, -1));
10773 __ Ldapursh(w12, MemOperand(x21, -4));
10774 __ Ldapursh(x13, MemOperand(x21, -4));
10775 __ Ldapursw(x14, MemOperand(x21, -5));
10776
10777 __ Ldapursb(x15, MemOperand(x20, 8));
10778 __ Ldapursh(x16, MemOperand(x20, 8));
10779 __ Ldapursw(x17, MemOperand(x20, 8));
10780
10781 END();
10782
10783 if (CAN_RUN()) {
10784 RUN();
10785 ASSERT_EQUAL_64(0xffffffef, x0);
10786 ASSERT_EQUAL_64(0xffffffffffffffef, x1);
10787 ASSERT_EQUAL_64(0xffffcdef, x2);
10788 ASSERT_EQUAL_64(0xffffffffffffcdef, x3);
10789 ASSERT_EQUAL_64(0xffffffff89abcdef, x4);
10790 ASSERT_EQUAL_64(0xffffff98, x5);
10791 ASSERT_EQUAL_64(0xffffffffffffff98, x6);
10792 ASSERT_EQUAL_64(0xffffdcba, x7);
10793 ASSERT_EQUAL_64(0xffffffffffffdcba, x8);
10794 ASSERT_EQUAL_64(0xffffffffba987654, x9);
10795 ASSERT_EQUAL_64(0xfffffffe, x10);
10796 ASSERT_EQUAL_64(0xfffffffffffffffe, x11);
10797 ASSERT_EQUAL_64(0xffffba98, x12);
10798 ASSERT_EQUAL_64(0xffffffffffffba98, x13);
10799 ASSERT_EQUAL_64(0xffffffffdcba9876, x14);
10800
10801 ASSERT_EQUAL_64(0x0000000000000010, x15);
10802 ASSERT_EQUAL_64(0x0000000000003210, x16);
10803 ASSERT_EQUAL_64(0x0000000076543210, x17);
10804 }
10805 }
10806
10807
TEST(stlurb_stlurh_strlur)10808 TEST(stlurb_stlurh_strlur) {
10809 uint64_t data[] __attribute__((aligned(kXRegSizeInBytes * 2))) = {0x0, 0x0};
10810
10811 uintptr_t data_base = reinterpret_cast<uintptr_t>(data);
10812
10813 SETUP_WITH_FEATURES(CPUFeatures::kRCpc, CPUFeatures::kRCpcImm);
10814 START();
10815
10816 __ Mov(x0, 0x0011223344556677);
10817 __ Mov(x20, data_base);
10818 __ Mov(x21, data_base + 2 * sizeof(data[0]));
10819
10820 __ Stlrb(w0, MemOperand(x20));
10821 __ Stlrh(w0, MemOperand(x20, 1));
10822 __ Stlr(w0, MemOperand(x20, 3));
10823 __ Stlr(x0, MemOperand(x21, -8));
10824
10825 END();
10826
10827 if (CAN_RUN()) {
10828 RUN();
10829 ASSERT_EQUAL_64(0x0044556677667777, data[0]);
10830 ASSERT_EQUAL_64(0x0011223344556677, data[1]);
10831 }
10832 }
10833
10834
10835 #define SIMPLE_ATOMIC_OPS(V, DEF) \
10836 V(DEF, add) \
10837 V(DEF, clr) \
10838 V(DEF, eor) \
10839 V(DEF, set) \
10840 V(DEF, smax) \
10841 V(DEF, smin) \
10842 V(DEF, umax) \
10843 V(DEF, umin)
10844
10845 #define SIMPLE_ATOMIC_STORE_MODES(V, NAME) \
10846 V(NAME) \
10847 V(NAME##l)
10848
10849 #define SIMPLE_ATOMIC_LOAD_MODES(V, NAME) \
10850 SIMPLE_ATOMIC_STORE_MODES(V, NAME) \
10851 V(NAME##a) \
10852 V(NAME##al)
10853
10854
TEST(unaligned_single_copy_atomicity)10855 TEST(unaligned_single_copy_atomicity) {
10856 uint64_t data0[] = {0x1010101010101010, 0x1010101010101010};
10857 uint64_t dst[] = {0x0000000000000000, 0x0000000000000000};
10858
10859 uint64_t* data0_aligned = AlignUp(data0, kAtomicAccessGranule);
10860 uint64_t* dst_aligned = AlignUp(dst, kAtomicAccessGranule);
10861
10862 CPUFeatures features(CPUFeatures::kAtomics,
10863 CPUFeatures::kLORegions,
10864 CPUFeatures::kRCpc,
10865 CPUFeatures::kRCpcImm);
10866 features.Combine(CPUFeatures::kUSCAT);
10867 SETUP_WITH_FEATURES(features);
10868 START();
10869
10870 __ Mov(x0, 0x0123456789abcdef);
10871 __ Mov(x1, 0x456789abcdef0123);
10872 __ Mov(x2, 0x89abcdef01234567);
10873 __ Mov(x3, 0xcdef0123456789ab);
10874 __ Mov(x18, reinterpret_cast<uintptr_t>(data0_aligned));
10875 __ Mov(x19, reinterpret_cast<uintptr_t>(dst_aligned));
10876 __ Mov(x20, x18);
10877 __ Mov(x21, x19);
10878
10879 for (unsigned i = 0; i < kAtomicAccessGranule; i++) {
10880 __ Stxrb(w0, w1, MemOperand(x20));
10881 __ Stlxrb(w0, w1, MemOperand(x20));
10882 __ Ldxrb(w0, MemOperand(x20));
10883 __ Ldaxrb(w0, MemOperand(x20));
10884 __ Stllrb(w0, MemOperand(x20));
10885 __ Stlrb(w0, MemOperand(x20));
10886 __ Casb(w0, w1, MemOperand(x20));
10887 __ Caslb(w0, w1, MemOperand(x20));
10888 __ Ldlarb(w0, MemOperand(x20));
10889 __ Ldarb(w0, MemOperand(x20));
10890 __ Casab(w0, w1, MemOperand(x20));
10891 __ Casalb(w0, w1, MemOperand(x20));
10892
10893 __ Swpb(w0, w1, MemOperand(x20));
10894 __ Swplb(w0, w1, MemOperand(x20));
10895 __ Swpab(w0, w1, MemOperand(x20));
10896 __ Swpalb(w0, w1, MemOperand(x20));
10897 __ Ldaprb(w0, MemOperand(x20));
10898 // Use offset instead of Add to test Stlurb and Ldapurb.
10899 __ Stlrb(w0, MemOperand(x19, i));
10900 __ Ldaprb(w0, MemOperand(x19, i));
10901 __ Ldapursb(w0, MemOperand(x20));
10902 __ Ldapursb(x0, MemOperand(x20));
10903
10904 #define ATOMIC_LOAD_B(NAME) __ Ld##NAME##b(w0, w1, MemOperand(x20));
10905 #define ATOMIC_STORE_B(NAME) __ St##NAME##b(w0, MemOperand(x20));
10906 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_B)
10907 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_B)
10908 #undef ATOMIC_LOAD_B
10909 #undef ATOMIC_STORE_B
10910
10911 if (i <= (kAtomicAccessGranule - kHRegSizeInBytes)) {
10912 __ Stxrh(w0, w1, MemOperand(x20));
10913 __ Stlxrh(w0, w1, MemOperand(x20));
10914 __ Ldxrh(w0, MemOperand(x20));
10915 __ Ldaxrh(w0, MemOperand(x20));
10916 __ Stllrh(w0, MemOperand(x20));
10917 __ Stlrh(w0, MemOperand(x20));
10918 __ Cash(w0, w1, MemOperand(x20));
10919 __ Caslh(w0, w1, MemOperand(x20));
10920 __ Ldlarh(w0, MemOperand(x20));
10921 __ Ldarh(w0, MemOperand(x20));
10922 __ Casah(w0, w1, MemOperand(x20));
10923 __ Casalh(w0, w1, MemOperand(x20));
10924
10925 __ Swph(w0, w1, MemOperand(x20));
10926 __ Swplh(w0, w1, MemOperand(x20));
10927 __ Swpah(w0, w1, MemOperand(x20));
10928 __ Swpalh(w0, w1, MemOperand(x20));
10929 __ Ldaprh(w0, MemOperand(x20));
10930 // Use offset instead of Add to test Stlurh and Ldapurh.
10931 __ Stlrh(w0, MemOperand(x19, i));
10932 __ Ldaprh(w0, MemOperand(x19, i));
10933 __ Ldapursh(w0, MemOperand(x20));
10934 __ Ldapursh(x0, MemOperand(x20));
10935
10936 #define ATOMIC_LOAD_H(NAME) __ Ld##NAME##h(w0, w1, MemOperand(x20));
10937 #define ATOMIC_STORE_H(NAME) __ St##NAME##h(w0, MemOperand(x20));
10938 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_H)
10939 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_H)
10940 #undef ATOMIC_LOAD_H
10941 #undef ATOMIC_STORE_H
10942 }
10943
10944 if (i <= (kAtomicAccessGranule - kWRegSizeInBytes)) {
10945 __ Stxr(w0, w1, MemOperand(x20));
10946 __ Stlxr(w0, w1, MemOperand(x20));
10947 __ Ldxr(w0, MemOperand(x20));
10948 __ Ldaxr(w0, MemOperand(x20));
10949 __ Stllr(w0, MemOperand(x20));
10950 __ Stlr(w0, MemOperand(x20));
10951 __ Cas(w0, w1, MemOperand(x20));
10952 __ Casl(w0, w1, MemOperand(x20));
10953 __ Ldlar(w0, MemOperand(x20));
10954 __ Ldar(w0, MemOperand(x20));
10955 __ Casa(w0, w1, MemOperand(x20));
10956 __ Casal(w0, w1, MemOperand(x20));
10957
10958 __ Swp(w0, w1, MemOperand(x20));
10959 __ Swpl(w0, w1, MemOperand(x20));
10960 __ Swpa(w0, w1, MemOperand(x20));
10961 __ Swpal(w0, w1, MemOperand(x20));
10962 __ Ldapr(w0, MemOperand(x20));
10963 // Use offset instead of Add to test Stlur and Ldapur.
10964 __ Stlr(w0, MemOperand(x19, i));
10965 __ Ldapr(w0, MemOperand(x19, i));
10966 __ Ldapursw(x0, MemOperand(x20));
10967
10968 #define ATOMIC_LOAD_W(NAME) __ Ld##NAME(w0, w1, MemOperand(x20));
10969 #define ATOMIC_STORE_W(NAME) __ St##NAME(w0, MemOperand(x20));
10970 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_W)
10971 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_W)
10972 #undef ATOMIC_LOAD_W
10973 #undef ATOMIC_STORE_W
10974 }
10975
10976 if (i <= (kAtomicAccessGranule - (kWRegSizeInBytes * 2))) {
10977 __ Casp(w0, w1, w2, w3, MemOperand(x20));
10978 __ Caspl(w0, w1, w2, w3, MemOperand(x20));
10979 __ Caspa(w0, w1, w2, w3, MemOperand(x20));
10980 __ Caspal(w0, w1, w2, w3, MemOperand(x20));
10981 __ Stxp(w0, w1, w2, MemOperand(x20));
10982 __ Stlxp(w0, w1, w2, MemOperand(x20));
10983 __ Ldxp(w0, w1, MemOperand(x20));
10984 __ Ldaxp(w0, w1, MemOperand(x20));
10985 }
10986
10987 if (i <= (kAtomicAccessGranule - kXRegSizeInBytes)) {
10988 __ Stxr(x0, x1, MemOperand(x20));
10989 __ Stlxr(x0, x1, MemOperand(x20));
10990 __ Ldxr(x0, MemOperand(x20));
10991 __ Ldaxr(x0, MemOperand(x20));
10992 __ Stllr(x0, MemOperand(x20));
10993 __ Stlr(x0, MemOperand(x20));
10994 __ Cas(x0, x1, MemOperand(x20));
10995 __ Casl(x0, x1, MemOperand(x20));
10996 __ Ldlar(x0, MemOperand(x20));
10997 __ Ldar(x0, MemOperand(x20));
10998 __ Casa(x0, x1, MemOperand(x20));
10999 __ Casal(x0, x1, MemOperand(x20));
11000
11001 __ Swp(x0, x1, MemOperand(x20));
11002 __ Swpl(x0, x1, MemOperand(x20));
11003 __ Swpa(x0, x1, MemOperand(x20));
11004 __ Swpal(x0, x1, MemOperand(x20));
11005 __ Ldapr(x0, MemOperand(x20));
11006 // Use offset instead of Add to test Stlur and Ldapur.
11007 __ Stlr(x0, MemOperand(x19, i));
11008 __ Ldapr(x0, MemOperand(x19, i));
11009
11010 #define ATOMIC_LOAD_X(NAME) __ Ld##NAME(x0, x1, MemOperand(x20));
11011 #define ATOMIC_STORE_X(NAME) __ St##NAME(x0, MemOperand(x20));
11012 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_X)
11013 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_X)
11014 #undef ATOMIC_LOAD_X
11015 #undef ATOMIC_STORE_X
11016 }
11017
11018 if (i <= (kAtomicAccessGranule - (kXRegSizeInBytes * 2))) {
11019 __ Casp(x0, x1, x2, x3, MemOperand(x20));
11020 __ Caspl(x0, x1, x2, x3, MemOperand(x20));
11021 __ Caspa(x0, x1, x2, x3, MemOperand(x20));
11022 __ Caspal(x0, x1, x2, x3, MemOperand(x20));
11023 __ Stxp(x0, x1, x2, MemOperand(x20));
11024 __ Stlxp(x0, x1, x2, MemOperand(x20));
11025 __ Ldxp(x0, x1, MemOperand(x20));
11026 __ Ldaxp(x0, x1, MemOperand(x20));
11027 }
11028
11029 __ Add(x20, x20, 1);
11030 __ Add(x21, x21, 1);
11031 }
11032 END();
11033
11034 if (CAN_RUN()) {
11035 // We can't detect kUSCAT with the CPUFeaturesAuditor so it fails the seen
11036 // check.
11037 RUN_WITHOUT_SEEN_FEATURE_CHECK();
11038 }
11039 }
11040
11041
11042 #if defined(VIXL_NEGATIVE_TESTING) && defined(VIXL_INCLUDE_SIMULATOR_AARCH64)
11043
11044 #define CHECK_ALIGN_FAIL(i, expr) \
11045 { \
11046 CPUFeatures features(CPUFeatures::kAtomics, \
11047 CPUFeatures::kLORegions, \
11048 CPUFeatures::kRCpc, \
11049 CPUFeatures::kRCpcImm); \
11050 features.Combine(CPUFeatures::kUSCAT); \
11051 SETUP_WITH_FEATURES(features); \
11052 START(); \
11053 __ Mov(x0, 0x0123456789abcdef); \
11054 __ Mov(x1, 0x456789abcdef0123); \
11055 __ Mov(x2, 0x89abcdef01234567); \
11056 __ Mov(x3, 0xcdef0123456789ab); \
11057 __ Mov(x20, reinterpret_cast<uintptr_t>(data0_aligned)); \
11058 __ Mov(x21, reinterpret_cast<uintptr_t>(dst_aligned)); \
11059 __ Add(x20, x20, i); \
11060 __ Add(x21, x21, i); \
11061 expr; \
11062 END(); \
11063 if (CAN_RUN()) { \
11064 /* We can't detect kUSCAT with the CPUFeaturesAuditor so it fails the */ \
11065 /* seen check. */ \
11066 MUST_FAIL_WITH_MESSAGE(RUN_WITHOUT_SEEN_FEATURE_CHECK(), \
11067 "ALIGNMENT EXCEPTION"); \
11068 } \
11069 }
11070
TEST(unaligned_single_copy_atomicity_negative_test)11071 TEST(unaligned_single_copy_atomicity_negative_test) {
11072 uint64_t data0[] = {0x1010101010101010, 0x1010101010101010};
11073 uint64_t dst[] = {0x0000000000000000, 0x0000000000000000};
11074
11075 uint64_t* data0_aligned = AlignUp(data0, kAtomicAccessGranule);
11076 uint64_t* dst_aligned = AlignUp(dst, kAtomicAccessGranule);
11077
11078 for (unsigned i = 0; i < kAtomicAccessGranule; i++) {
11079 if (i > (kAtomicAccessGranule - kHRegSizeInBytes)) {
11080 CHECK_ALIGN_FAIL(i, __ Stxrh(w0, w1, MemOperand(x20)));
11081 CHECK_ALIGN_FAIL(i, __ Stlxrh(w0, w1, MemOperand(x20)));
11082 CHECK_ALIGN_FAIL(i, __ Ldxrh(w0, MemOperand(x20)));
11083 CHECK_ALIGN_FAIL(i, __ Ldaxrh(w0, MemOperand(x20)));
11084 CHECK_ALIGN_FAIL(i, __ Stllrh(w0, MemOperand(x20)));
11085 CHECK_ALIGN_FAIL(i, __ Stlrh(w0, MemOperand(x20)));
11086 CHECK_ALIGN_FAIL(i, __ Cash(w0, w1, MemOperand(x20)));
11087 CHECK_ALIGN_FAIL(i, __ Caslh(w0, w1, MemOperand(x20)));
11088 CHECK_ALIGN_FAIL(i, __ Ldlarh(w0, MemOperand(x20)));
11089 CHECK_ALIGN_FAIL(i, __ Ldarh(w0, MemOperand(x20)));
11090 CHECK_ALIGN_FAIL(i, __ Casah(w0, w1, MemOperand(x20)));
11091 CHECK_ALIGN_FAIL(i, __ Casalh(w0, w1, MemOperand(x20)));
11092
11093 CHECK_ALIGN_FAIL(i, __ Swph(w0, w1, MemOperand(x20)));
11094 CHECK_ALIGN_FAIL(i, __ Swplh(w0, w1, MemOperand(x20)));
11095 CHECK_ALIGN_FAIL(i, __ Swpah(w0, w1, MemOperand(x20)));
11096 CHECK_ALIGN_FAIL(i, __ Swpalh(w0, w1, MemOperand(x20)));
11097 CHECK_ALIGN_FAIL(i, __ Ldaprh(w0, MemOperand(x20)));
11098 // Use offset instead of Add to test Stlurh and Ldapurh.
11099 CHECK_ALIGN_FAIL(0, __ Stlrh(w0, MemOperand(x20, i)));
11100 CHECK_ALIGN_FAIL(0, __ Ldaprh(w0, MemOperand(x20, i)));
11101 CHECK_ALIGN_FAIL(i, __ Ldapursh(w0, MemOperand(x20)));
11102 CHECK_ALIGN_FAIL(i, __ Ldapursh(x0, MemOperand(x20)));
11103
11104 #define ATOMIC_LOAD_H(NAME) \
11105 CHECK_ALIGN_FAIL(i, __ Ld##NAME##h(w0, w1, MemOperand(x20)));
11106 #define ATOMIC_STORE_H(NAME) \
11107 CHECK_ALIGN_FAIL(i, __ St##NAME##h(w0, MemOperand(x20)));
11108 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_H)
11109 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_H)
11110 #undef ATOMIC_LOAD_H
11111 #undef ATOMIC_STORE_H
11112 }
11113
11114 if (i > (kAtomicAccessGranule - kWRegSizeInBytes)) {
11115 CHECK_ALIGN_FAIL(i, __ Stxr(w0, w1, MemOperand(x20)));
11116 CHECK_ALIGN_FAIL(i, __ Stlxr(w0, w1, MemOperand(x20)));
11117 CHECK_ALIGN_FAIL(i, __ Ldxr(w0, MemOperand(x20)));
11118 CHECK_ALIGN_FAIL(i, __ Ldaxr(w0, MemOperand(x20)));
11119 CHECK_ALIGN_FAIL(i, __ Stllr(w0, MemOperand(x20)));
11120 CHECK_ALIGN_FAIL(i, __ Stlr(w0, MemOperand(x20)));
11121 CHECK_ALIGN_FAIL(i, __ Cas(w0, w1, MemOperand(x20)));
11122 CHECK_ALIGN_FAIL(i, __ Casl(w0, w1, MemOperand(x20)));
11123 CHECK_ALIGN_FAIL(i, __ Ldlar(w0, MemOperand(x20)));
11124 CHECK_ALIGN_FAIL(i, __ Ldar(w0, MemOperand(x20)));
11125 CHECK_ALIGN_FAIL(i, __ Casa(w0, w1, MemOperand(x20)));
11126 CHECK_ALIGN_FAIL(i, __ Casal(w0, w1, MemOperand(x20)));
11127
11128 CHECK_ALIGN_FAIL(i, __ Swp(w0, w1, MemOperand(x20)));
11129 CHECK_ALIGN_FAIL(i, __ Swpl(w0, w1, MemOperand(x20)));
11130 CHECK_ALIGN_FAIL(i, __ Swpa(w0, w1, MemOperand(x20)));
11131 CHECK_ALIGN_FAIL(i, __ Swpal(w0, w1, MemOperand(x20)));
11132 CHECK_ALIGN_FAIL(i, __ Ldapr(w0, MemOperand(x20)));
11133 // Use offset instead of add to test Stlur and Ldapur.
11134 CHECK_ALIGN_FAIL(0, __ Stlr(w0, MemOperand(x20, i)));
11135 CHECK_ALIGN_FAIL(0, __ Ldapr(w0, MemOperand(x20, i)));
11136 CHECK_ALIGN_FAIL(i, __ Ldapursw(x0, MemOperand(x20)));
11137
11138 #define ATOMIC_LOAD_W(NAME) \
11139 CHECK_ALIGN_FAIL(i, __ Ld##NAME(w0, w1, MemOperand(x20)));
11140 #define ATOMIC_STORE_W(NAME) \
11141 CHECK_ALIGN_FAIL(i, __ St##NAME(w0, MemOperand(x20)));
11142 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_W)
11143 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_W)
11144 #undef ATOMIC_LOAD_W
11145 #undef ATOMIC_STORE_W
11146 }
11147
11148 if (i > (kAtomicAccessGranule - (kWRegSizeInBytes * 2))) {
11149 CHECK_ALIGN_FAIL(i, __ Casp(w0, w1, w2, w3, MemOperand(x20)));
11150 CHECK_ALIGN_FAIL(i, __ Caspl(w0, w1, w2, w3, MemOperand(x20)));
11151 CHECK_ALIGN_FAIL(i, __ Caspa(w0, w1, w2, w3, MemOperand(x20)));
11152 CHECK_ALIGN_FAIL(i, __ Caspal(w0, w1, w2, w3, MemOperand(x20)));
11153 CHECK_ALIGN_FAIL(i, __ Stxp(w0, w1, w2, MemOperand(x20)));
11154 CHECK_ALIGN_FAIL(i, __ Stlxp(w0, w1, w2, MemOperand(x20)));
11155 CHECK_ALIGN_FAIL(i, __ Ldxp(w0, w1, MemOperand(x20)));
11156 CHECK_ALIGN_FAIL(i, __ Ldaxp(w0, w1, MemOperand(x20)));
11157 }
11158
11159 if (i > (kAtomicAccessGranule - kXRegSizeInBytes)) {
11160 CHECK_ALIGN_FAIL(i, __ Stxr(x0, x1, MemOperand(x20)));
11161 CHECK_ALIGN_FAIL(i, __ Stlxr(x0, x1, MemOperand(x20)));
11162 CHECK_ALIGN_FAIL(i, __ Ldxr(x0, MemOperand(x20)));
11163 CHECK_ALIGN_FAIL(i, __ Ldaxr(x0, MemOperand(x20)));
11164 CHECK_ALIGN_FAIL(i, __ Stllr(x0, MemOperand(x20)));
11165 CHECK_ALIGN_FAIL(i, __ Stlr(x0, MemOperand(x20)));
11166 CHECK_ALIGN_FAIL(i, __ Cas(x0, x1, MemOperand(x20)));
11167 CHECK_ALIGN_FAIL(i, __ Casl(x0, x1, MemOperand(x20)));
11168 CHECK_ALIGN_FAIL(i, __ Ldlar(x0, MemOperand(x20)));
11169 CHECK_ALIGN_FAIL(i, __ Ldar(x0, MemOperand(x20)));
11170 CHECK_ALIGN_FAIL(i, __ Casa(x0, x1, MemOperand(x20)));
11171 CHECK_ALIGN_FAIL(i, __ Casal(x0, x1, MemOperand(x20)));
11172
11173 CHECK_ALIGN_FAIL(i, __ Swp(x0, x1, MemOperand(x20)));
11174 CHECK_ALIGN_FAIL(i, __ Swpl(x0, x1, MemOperand(x20)));
11175 CHECK_ALIGN_FAIL(i, __ Swpa(x0, x1, MemOperand(x20)));
11176 CHECK_ALIGN_FAIL(i, __ Swpal(x0, x1, MemOperand(x20)));
11177 CHECK_ALIGN_FAIL(i, __ Ldapr(x0, MemOperand(x20)));
11178 // Use offset instead of add to test Stlur and Ldapur.
11179 CHECK_ALIGN_FAIL(0, __ Stlr(x0, MemOperand(x20, i)));
11180 CHECK_ALIGN_FAIL(0, __ Ldapr(x0, MemOperand(x20, i)));
11181
11182 #define ATOMIC_LOAD_X(NAME) \
11183 CHECK_ALIGN_FAIL(i, __ Ld##NAME(x0, x1, MemOperand(x20)));
11184 #define ATOMIC_STORE_X(NAME) \
11185 CHECK_ALIGN_FAIL(i, __ St##NAME(x0, MemOperand(x20)));
11186 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_LOAD_MODES, ATOMIC_LOAD_X)
11187 SIMPLE_ATOMIC_OPS(SIMPLE_ATOMIC_STORE_MODES, ATOMIC_STORE_X)
11188 #undef ATOMIC_LOAD_X
11189 #undef ATOMIC_STORE_X
11190 }
11191
11192 if (i > (kAtomicAccessGranule - (kXRegSizeInBytes * 2))) {
11193 CHECK_ALIGN_FAIL(i, __ Casp(x0, x1, x2, x3, MemOperand(x20)));
11194 CHECK_ALIGN_FAIL(i, __ Caspl(x0, x1, x2, x3, MemOperand(x20)));
11195 CHECK_ALIGN_FAIL(i, __ Caspa(x0, x1, x2, x3, MemOperand(x20)));
11196 CHECK_ALIGN_FAIL(i, __ Caspal(x0, x1, x2, x3, MemOperand(x20)));
11197 CHECK_ALIGN_FAIL(i, __ Stxp(x0, x1, x2, MemOperand(x20)));
11198 CHECK_ALIGN_FAIL(i, __ Stlxp(x0, x1, x2, MemOperand(x20)));
11199 CHECK_ALIGN_FAIL(i, __ Ldxp(x0, x1, MemOperand(x20)));
11200 CHECK_ALIGN_FAIL(i, __ Ldaxp(x0, x1, MemOperand(x20)));
11201 }
11202 }
11203 }
11204
TEST(unaligned_single_copy_atomicity_negative_test_2)11205 TEST(unaligned_single_copy_atomicity_negative_test_2) {
11206 uint64_t data[] = {0x1010101010101010, 0x1010101010101010};
11207
11208 uint64_t* data_aligned = AlignUp(data, kAtomicAccessGranule);
11209
11210 // Check that the same code doesn't fail with USCAT enabled but does
11211 // fail when not enabled.
11212 {
11213 SETUP_WITH_FEATURES(CPUFeatures::kUSCAT);
11214 START();
11215 __ Mov(x0, reinterpret_cast<uintptr_t>(data_aligned));
11216 __ Add(x0, x0, 1);
11217 __ Ldxrh(w1, MemOperand(x0));
11218 END();
11219 if (CAN_RUN()) {
11220 RUN_WITHOUT_SEEN_FEATURE_CHECK();
11221 }
11222 }
11223 {
11224 SETUP();
11225 START();
11226 __ Mov(x0, reinterpret_cast<uintptr_t>(data_aligned));
11227 __ Add(x0, x0, 1);
11228 __ Ldxrh(w1, MemOperand(x0));
11229 END();
11230 if (CAN_RUN()) {
11231 MUST_FAIL_WITH_MESSAGE(RUN(), "ALIGNMENT EXCEPTION");
11232 }
11233 }
11234 }
11235 #endif // VIXL_NEGATIVE_TESTING && VIXL_INCLUDE_SIMULATOR_AARCH64
11236
11237
TEST(load_store_tagged_immediate_offset)11238 TEST(load_store_tagged_immediate_offset) {
11239 uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
11240 int tag_count = sizeof(tags) / sizeof(tags[0]);
11241
11242 const int kMaxDataLength = 160;
11243
11244 for (int i = 0; i < tag_count; i++) {
11245 unsigned char src[kMaxDataLength];
11246 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
11247 uint64_t src_tag = tags[i];
11248 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
11249
11250 for (int k = 0; k < kMaxDataLength; k++) {
11251 src[k] = k + 1;
11252 }
11253
11254 for (int j = 0; j < tag_count; j++) {
11255 unsigned char dst[kMaxDataLength];
11256 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
11257 uint64_t dst_tag = tags[j];
11258 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
11259
11260 memset(dst, 0, kMaxDataLength);
11261
11262 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
11263 START();
11264
11265 __ Mov(x0, src_tagged);
11266 __ Mov(x1, dst_tagged);
11267
11268 int offset = 0;
11269
11270 // Scaled-immediate offsets.
11271 {
11272 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11273 __ ldp(q0, q1, MemOperand(x0, offset));
11274 __ stp(q0, q1, MemOperand(x1, offset));
11275 }
11276 offset += 2 * kQRegSizeInBytes;
11277
11278 {
11279 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11280 __ ldp(x2, x3, MemOperand(x0, offset));
11281 __ stp(x2, x3, MemOperand(x1, offset));
11282 }
11283 offset += 2 * kXRegSizeInBytes;
11284
11285 {
11286 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11287 __ ldpsw(x2, x3, MemOperand(x0, offset));
11288 __ stp(w2, w3, MemOperand(x1, offset));
11289 }
11290 offset += 2 * kWRegSizeInBytes;
11291
11292 {
11293 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11294 __ ldp(d0, d1, MemOperand(x0, offset));
11295 __ stp(d0, d1, MemOperand(x1, offset));
11296 }
11297 offset += 2 * kDRegSizeInBytes;
11298
11299 {
11300 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11301 __ ldp(w2, w3, MemOperand(x0, offset));
11302 __ stp(w2, w3, MemOperand(x1, offset));
11303 }
11304 offset += 2 * kWRegSizeInBytes;
11305
11306 {
11307 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11308 __ ldp(s0, s1, MemOperand(x0, offset));
11309 __ stp(s0, s1, MemOperand(x1, offset));
11310 }
11311 offset += 2 * kSRegSizeInBytes;
11312
11313 {
11314 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11315 __ ldr(x2, MemOperand(x0, offset), RequireScaledOffset);
11316 __ str(x2, MemOperand(x1, offset), RequireScaledOffset);
11317 }
11318 offset += kXRegSizeInBytes;
11319
11320 {
11321 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11322 __ ldr(d0, MemOperand(x0, offset), RequireScaledOffset);
11323 __ str(d0, MemOperand(x1, offset), RequireScaledOffset);
11324 }
11325 offset += kDRegSizeInBytes;
11326
11327 {
11328 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11329 __ ldr(w2, MemOperand(x0, offset), RequireScaledOffset);
11330 __ str(w2, MemOperand(x1, offset), RequireScaledOffset);
11331 }
11332 offset += kWRegSizeInBytes;
11333
11334 {
11335 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11336 __ ldr(s0, MemOperand(x0, offset), RequireScaledOffset);
11337 __ str(s0, MemOperand(x1, offset), RequireScaledOffset);
11338 }
11339 offset += kSRegSizeInBytes;
11340
11341 {
11342 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11343 __ ldrh(w2, MemOperand(x0, offset), RequireScaledOffset);
11344 __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
11345 }
11346 offset += 2;
11347
11348 {
11349 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11350 __ ldrsh(w2, MemOperand(x0, offset), RequireScaledOffset);
11351 __ strh(w2, MemOperand(x1, offset), RequireScaledOffset);
11352 }
11353 offset += 2;
11354
11355 {
11356 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11357 __ ldrb(w2, MemOperand(x0, offset), RequireScaledOffset);
11358 __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
11359 }
11360 offset += 1;
11361
11362 {
11363 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11364 __ ldrsb(w2, MemOperand(x0, offset), RequireScaledOffset);
11365 __ strb(w2, MemOperand(x1, offset), RequireScaledOffset);
11366 }
11367 offset += 1;
11368
11369 // Unscaled-immediate offsets.
11370
11371 {
11372 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11373 __ ldur(x2, MemOperand(x0, offset), RequireUnscaledOffset);
11374 __ stur(x2, MemOperand(x1, offset), RequireUnscaledOffset);
11375 }
11376 offset += kXRegSizeInBytes;
11377
11378 {
11379 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11380 __ ldur(d0, MemOperand(x0, offset), RequireUnscaledOffset);
11381 __ stur(d0, MemOperand(x1, offset), RequireUnscaledOffset);
11382 }
11383 offset += kDRegSizeInBytes;
11384
11385 {
11386 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11387 __ ldur(w2, MemOperand(x0, offset), RequireUnscaledOffset);
11388 __ stur(w2, MemOperand(x1, offset), RequireUnscaledOffset);
11389 }
11390 offset += kWRegSizeInBytes;
11391
11392 {
11393 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11394 __ ldur(s0, MemOperand(x0, offset), RequireUnscaledOffset);
11395 __ stur(s0, MemOperand(x1, offset), RequireUnscaledOffset);
11396 }
11397 offset += kSRegSizeInBytes;
11398
11399 {
11400 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11401 __ ldurh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
11402 __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
11403 }
11404 offset += 2;
11405
11406 {
11407 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11408 __ ldursh(w2, MemOperand(x0, offset), RequireUnscaledOffset);
11409 __ sturh(w2, MemOperand(x1, offset), RequireUnscaledOffset);
11410 }
11411 offset += 2;
11412
11413 {
11414 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11415 __ ldurb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
11416 __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
11417 }
11418 offset += 1;
11419
11420 {
11421 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11422 __ ldursb(w2, MemOperand(x0, offset), RequireUnscaledOffset);
11423 __ sturb(w2, MemOperand(x1, offset), RequireUnscaledOffset);
11424 }
11425 offset += 1;
11426
11427 // Extract the tag (so we can test that it was preserved correctly).
11428 __ Ubfx(x0, x0, kAddressTagOffset, kAddressTagWidth);
11429 __ Ubfx(x1, x1, kAddressTagOffset, kAddressTagWidth);
11430
11431 VIXL_ASSERT(kMaxDataLength >= offset);
11432
11433 END();
11434 if (CAN_RUN()) {
11435 RUN();
11436
11437 ASSERT_EQUAL_64(src_tag, x0);
11438 ASSERT_EQUAL_64(dst_tag, x1);
11439
11440 for (int k = 0; k < offset; k++) {
11441 VIXL_CHECK(src[k] == dst[k]);
11442 }
11443 }
11444 }
11445 }
11446 }
11447
11448
TEST(load_store_tagged_immediate_preindex)11449 TEST(load_store_tagged_immediate_preindex) {
11450 uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
11451 int tag_count = sizeof(tags) / sizeof(tags[0]);
11452
11453 const int kMaxDataLength = 128;
11454
11455 for (int i = 0; i < tag_count; i++) {
11456 unsigned char src[kMaxDataLength];
11457 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
11458 uint64_t src_tag = tags[i];
11459 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
11460
11461 for (int k = 0; k < kMaxDataLength; k++) {
11462 src[k] = k + 1;
11463 }
11464
11465 for (int j = 0; j < tag_count; j++) {
11466 unsigned char dst[kMaxDataLength];
11467 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
11468 uint64_t dst_tag = tags[j];
11469 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
11470
11471 for (int k = 0; k < kMaxDataLength; k++) {
11472 dst[k] = 0;
11473 }
11474
11475 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
11476 START();
11477
11478 // Each MemOperand must apply a pre-index equal to the size of the
11479 // previous access.
11480
11481 // Start with a non-zero preindex.
11482 int preindex = 62 * kXRegSizeInBytes;
11483 int data_length = 0;
11484
11485 __ Mov(x0, src_tagged - preindex);
11486 __ Mov(x1, dst_tagged - preindex);
11487
11488 {
11489 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11490 __ ldp(q0, q1, MemOperand(x0, preindex, PreIndex));
11491 __ stp(q0, q1, MemOperand(x1, preindex, PreIndex));
11492 }
11493 preindex = 2 * kQRegSizeInBytes;
11494 data_length = preindex;
11495
11496 {
11497 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11498 __ ldp(x2, x3, MemOperand(x0, preindex, PreIndex));
11499 __ stp(x2, x3, MemOperand(x1, preindex, PreIndex));
11500 }
11501 preindex = 2 * kXRegSizeInBytes;
11502 data_length += preindex;
11503
11504 {
11505 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11506 __ ldpsw(x2, x3, MemOperand(x0, preindex, PreIndex));
11507 __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
11508 }
11509 preindex = 2 * kWRegSizeInBytes;
11510 data_length += preindex;
11511
11512 {
11513 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11514 __ ldp(d0, d1, MemOperand(x0, preindex, PreIndex));
11515 __ stp(d0, d1, MemOperand(x1, preindex, PreIndex));
11516 }
11517 preindex = 2 * kDRegSizeInBytes;
11518 data_length += preindex;
11519
11520 {
11521 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11522 __ ldp(w2, w3, MemOperand(x0, preindex, PreIndex));
11523 __ stp(w2, w3, MemOperand(x1, preindex, PreIndex));
11524 }
11525 preindex = 2 * kWRegSizeInBytes;
11526 data_length += preindex;
11527
11528 {
11529 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11530 __ ldp(s0, s1, MemOperand(x0, preindex, PreIndex));
11531 __ stp(s0, s1, MemOperand(x1, preindex, PreIndex));
11532 }
11533 preindex = 2 * kSRegSizeInBytes;
11534 data_length += preindex;
11535
11536 {
11537 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11538 __ ldr(x2, MemOperand(x0, preindex, PreIndex));
11539 __ str(x2, MemOperand(x1, preindex, PreIndex));
11540 }
11541 preindex = kXRegSizeInBytes;
11542 data_length += preindex;
11543
11544 {
11545 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11546 __ ldr(d0, MemOperand(x0, preindex, PreIndex));
11547 __ str(d0, MemOperand(x1, preindex, PreIndex));
11548 }
11549 preindex = kDRegSizeInBytes;
11550 data_length += preindex;
11551
11552 {
11553 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11554 __ ldr(w2, MemOperand(x0, preindex, PreIndex));
11555 __ str(w2, MemOperand(x1, preindex, PreIndex));
11556 }
11557 preindex = kWRegSizeInBytes;
11558 data_length += preindex;
11559
11560 {
11561 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11562 __ ldr(s0, MemOperand(x0, preindex, PreIndex));
11563 __ str(s0, MemOperand(x1, preindex, PreIndex));
11564 }
11565 preindex = kSRegSizeInBytes;
11566 data_length += preindex;
11567
11568 {
11569 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11570 __ ldrh(w2, MemOperand(x0, preindex, PreIndex));
11571 __ strh(w2, MemOperand(x1, preindex, PreIndex));
11572 }
11573 preindex = 2;
11574 data_length += preindex;
11575
11576 {
11577 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11578 __ ldrsh(w2, MemOperand(x0, preindex, PreIndex));
11579 __ strh(w2, MemOperand(x1, preindex, PreIndex));
11580 }
11581 preindex = 2;
11582 data_length += preindex;
11583
11584 {
11585 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11586 __ ldrb(w2, MemOperand(x0, preindex, PreIndex));
11587 __ strb(w2, MemOperand(x1, preindex, PreIndex));
11588 }
11589 preindex = 1;
11590 data_length += preindex;
11591
11592 {
11593 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11594 __ ldrsb(w2, MemOperand(x0, preindex, PreIndex));
11595 __ strb(w2, MemOperand(x1, preindex, PreIndex));
11596 }
11597 preindex = 1;
11598 data_length += preindex;
11599
11600 VIXL_ASSERT(kMaxDataLength >= data_length);
11601
11602 END();
11603 if (CAN_RUN()) {
11604 RUN();
11605
11606 // Check that the preindex was correctly applied in each operation, and
11607 // that the tag was preserved.
11608 ASSERT_EQUAL_64(src_tagged + data_length - preindex, x0);
11609 ASSERT_EQUAL_64(dst_tagged + data_length - preindex, x1);
11610
11611 for (int k = 0; k < data_length; k++) {
11612 VIXL_CHECK(src[k] == dst[k]);
11613 }
11614 }
11615 }
11616 }
11617 }
11618
11619
TEST(load_store_tagged_immediate_postindex)11620 TEST(load_store_tagged_immediate_postindex) {
11621 uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
11622 int tag_count = sizeof(tags) / sizeof(tags[0]);
11623
11624 const int kMaxDataLength = 128;
11625
11626 for (int i = 0; i < tag_count; i++) {
11627 unsigned char src[kMaxDataLength];
11628 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
11629 uint64_t src_tag = tags[i];
11630 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
11631
11632 for (int k = 0; k < kMaxDataLength; k++) {
11633 src[k] = k + 1;
11634 }
11635
11636 for (int j = 0; j < tag_count; j++) {
11637 unsigned char dst[kMaxDataLength];
11638 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
11639 uint64_t dst_tag = tags[j];
11640 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
11641
11642 for (int k = 0; k < kMaxDataLength; k++) {
11643 dst[k] = 0;
11644 }
11645
11646 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
11647 START();
11648
11649 int postindex = 2 * kXRegSizeInBytes;
11650 int data_length = 0;
11651
11652 __ Mov(x0, src_tagged);
11653 __ Mov(x1, dst_tagged);
11654
11655 {
11656 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11657 __ ldp(x2, x3, MemOperand(x0, postindex, PostIndex));
11658 __ stp(x2, x3, MemOperand(x1, postindex, PostIndex));
11659 }
11660 data_length = postindex;
11661
11662 postindex = 2 * kQRegSizeInBytes;
11663 {
11664 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11665 __ ldp(q0, q1, MemOperand(x0, postindex, PostIndex));
11666 __ stp(q0, q1, MemOperand(x1, postindex, PostIndex));
11667 }
11668 data_length += postindex;
11669
11670 postindex = 2 * kWRegSizeInBytes;
11671 {
11672 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11673 __ ldpsw(x2, x3, MemOperand(x0, postindex, PostIndex));
11674 __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
11675 }
11676 data_length += postindex;
11677
11678 postindex = 2 * kDRegSizeInBytes;
11679 {
11680 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11681 __ ldp(d0, d1, MemOperand(x0, postindex, PostIndex));
11682 __ stp(d0, d1, MemOperand(x1, postindex, PostIndex));
11683 }
11684 data_length += postindex;
11685
11686 postindex = 2 * kWRegSizeInBytes;
11687 {
11688 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11689 __ ldp(w2, w3, MemOperand(x0, postindex, PostIndex));
11690 __ stp(w2, w3, MemOperand(x1, postindex, PostIndex));
11691 }
11692 data_length += postindex;
11693
11694 postindex = 2 * kSRegSizeInBytes;
11695 {
11696 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11697 __ ldp(s0, s1, MemOperand(x0, postindex, PostIndex));
11698 __ stp(s0, s1, MemOperand(x1, postindex, PostIndex));
11699 }
11700 data_length += postindex;
11701
11702 postindex = kXRegSizeInBytes;
11703 {
11704 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11705 __ ldr(x2, MemOperand(x0, postindex, PostIndex));
11706 __ str(x2, MemOperand(x1, postindex, PostIndex));
11707 }
11708 data_length += postindex;
11709
11710 postindex = kDRegSizeInBytes;
11711 {
11712 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11713 __ ldr(d0, MemOperand(x0, postindex, PostIndex));
11714 __ str(d0, MemOperand(x1, postindex, PostIndex));
11715 }
11716 data_length += postindex;
11717
11718 postindex = kWRegSizeInBytes;
11719 {
11720 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11721 __ ldr(w2, MemOperand(x0, postindex, PostIndex));
11722 __ str(w2, MemOperand(x1, postindex, PostIndex));
11723 }
11724 data_length += postindex;
11725
11726 postindex = kSRegSizeInBytes;
11727 {
11728 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11729 __ ldr(s0, MemOperand(x0, postindex, PostIndex));
11730 __ str(s0, MemOperand(x1, postindex, PostIndex));
11731 }
11732 data_length += postindex;
11733
11734 postindex = 2;
11735 {
11736 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11737 __ ldrh(w2, MemOperand(x0, postindex, PostIndex));
11738 __ strh(w2, MemOperand(x1, postindex, PostIndex));
11739 }
11740 data_length += postindex;
11741
11742 postindex = 2;
11743 {
11744 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11745 __ ldrsh(w2, MemOperand(x0, postindex, PostIndex));
11746 __ strh(w2, MemOperand(x1, postindex, PostIndex));
11747 }
11748 data_length += postindex;
11749
11750 postindex = 1;
11751 {
11752 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11753 __ ldrb(w2, MemOperand(x0, postindex, PostIndex));
11754 __ strb(w2, MemOperand(x1, postindex, PostIndex));
11755 }
11756 data_length += postindex;
11757
11758 postindex = 1;
11759 {
11760 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11761 __ ldrsb(w2, MemOperand(x0, postindex, PostIndex));
11762 __ strb(w2, MemOperand(x1, postindex, PostIndex));
11763 }
11764 data_length += postindex;
11765
11766 VIXL_ASSERT(kMaxDataLength >= data_length);
11767
11768 END();
11769 if (CAN_RUN()) {
11770 RUN();
11771
11772 // Check that the postindex was correctly applied in each operation, and
11773 // that the tag was preserved.
11774 ASSERT_EQUAL_64(src_tagged + data_length, x0);
11775 ASSERT_EQUAL_64(dst_tagged + data_length, x1);
11776
11777 for (int k = 0; k < data_length; k++) {
11778 VIXL_CHECK(src[k] == dst[k]);
11779 }
11780 }
11781 }
11782 }
11783 }
11784
11785
TEST(load_store_tagged_register_offset)11786 TEST(load_store_tagged_register_offset) {
11787 uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
11788 int tag_count = sizeof(tags) / sizeof(tags[0]);
11789
11790 const int kMaxDataLength = 128;
11791
11792 for (int i = 0; i < tag_count; i++) {
11793 unsigned char src[kMaxDataLength];
11794 uint64_t src_raw = reinterpret_cast<uint64_t>(src);
11795 uint64_t src_tag = tags[i];
11796 uint64_t src_tagged = CPU::SetPointerTag(src_raw, src_tag);
11797
11798 for (int k = 0; k < kMaxDataLength; k++) {
11799 src[k] = k + 1;
11800 }
11801
11802 for (int j = 0; j < tag_count; j++) {
11803 unsigned char dst[kMaxDataLength];
11804 uint64_t dst_raw = reinterpret_cast<uint64_t>(dst);
11805 uint64_t dst_tag = tags[j];
11806 uint64_t dst_tagged = CPU::SetPointerTag(dst_raw, dst_tag);
11807
11808 // Also tag the offset register; the operation should still succeed.
11809 for (int o = 0; o < tag_count; o++) {
11810 uint64_t offset_base = CPU::SetPointerTag(UINT64_C(0), tags[o]);
11811 int data_length = 0;
11812
11813 for (int k = 0; k < kMaxDataLength; k++) {
11814 dst[k] = 0;
11815 }
11816
11817 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
11818 START();
11819
11820 __ Mov(x0, src_tagged);
11821 __ Mov(x1, dst_tagged);
11822
11823 __ Mov(x10, offset_base + data_length);
11824 {
11825 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11826 __ ldr(x2, MemOperand(x0, x10));
11827 __ str(x2, MemOperand(x1, x10));
11828 }
11829 data_length += kXRegSizeInBytes;
11830
11831 __ Mov(x10, offset_base + data_length);
11832 {
11833 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11834 __ ldr(d0, MemOperand(x0, x10));
11835 __ str(d0, MemOperand(x1, x10));
11836 }
11837 data_length += kDRegSizeInBytes;
11838
11839 __ Mov(x10, offset_base + data_length);
11840 {
11841 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11842 __ ldr(w2, MemOperand(x0, x10));
11843 __ str(w2, MemOperand(x1, x10));
11844 }
11845 data_length += kWRegSizeInBytes;
11846
11847 __ Mov(x10, offset_base + data_length);
11848 {
11849 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11850 __ ldr(s0, MemOperand(x0, x10));
11851 __ str(s0, MemOperand(x1, x10));
11852 }
11853 data_length += kSRegSizeInBytes;
11854
11855 __ Mov(x10, offset_base + data_length);
11856 {
11857 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11858 __ ldrh(w2, MemOperand(x0, x10));
11859 __ strh(w2, MemOperand(x1, x10));
11860 }
11861 data_length += 2;
11862
11863 __ Mov(x10, offset_base + data_length);
11864 {
11865 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11866 __ ldrsh(w2, MemOperand(x0, x10));
11867 __ strh(w2, MemOperand(x1, x10));
11868 }
11869 data_length += 2;
11870
11871 __ Mov(x10, offset_base + data_length);
11872 {
11873 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11874 __ ldrb(w2, MemOperand(x0, x10));
11875 __ strb(w2, MemOperand(x1, x10));
11876 }
11877 data_length += 1;
11878
11879 __ Mov(x10, offset_base + data_length);
11880 {
11881 ExactAssemblyScope scope(&masm, 2 * kInstructionSize);
11882 __ ldrsb(w2, MemOperand(x0, x10));
11883 __ strb(w2, MemOperand(x1, x10));
11884 }
11885 data_length += 1;
11886
11887 VIXL_ASSERT(kMaxDataLength >= data_length);
11888
11889 END();
11890 if (CAN_RUN()) {
11891 RUN();
11892
11893 // Check that the postindex was correctly applied in each operation,
11894 // and that the tag was preserved.
11895 ASSERT_EQUAL_64(src_tagged, x0);
11896 ASSERT_EQUAL_64(dst_tagged, x1);
11897 ASSERT_EQUAL_64(offset_base + data_length - 1, x10);
11898
11899 for (int k = 0; k < data_length; k++) {
11900 VIXL_CHECK(src[k] == dst[k]);
11901 }
11902 }
11903 }
11904 }
11905 }
11906 }
11907
11908
TEST(load_store_tagged_register_postindex)11909 TEST(load_store_tagged_register_postindex) {
11910 uint64_t src[] = {0x0706050403020100, 0x0f0e0d0c0b0a0908};
11911 uint64_t tags[] = {0x00, 0x1, 0x55, 0xff};
11912 int tag_count = sizeof(tags) / sizeof(tags[0]);
11913
11914 for (int j = 0; j < tag_count; j++) {
11915 for (int i = 0; i < tag_count; i++) {
11916 SETUP_WITH_FEATURES(CPUFeatures::kNEON);
11917
11918 uint64_t src_base = reinterpret_cast<uint64_t>(src);
11919 uint64_t src_tagged = CPU::SetPointerTag(src_base, tags[i]);
11920 uint64_t offset_tagged = CPU::SetPointerTag(UINT64_C(0), tags[j]);
11921
11922 START();
11923 __ Mov(x10, src_tagged);
11924 __ Mov(x11, offset_tagged);
11925 __ Ld1(v0.V16B(), MemOperand(x10, x11, PostIndex));
11926 // TODO: add other instructions (ld2-4, st1-4) as they become available.
11927 END();
11928
11929 if (CAN_RUN()) {
11930 RUN();
11931
11932 ASSERT_EQUAL_128(0x0f0e0d0c0b0a0908, 0x0706050403020100, q0);
11933 ASSERT_EQUAL_64(src_tagged + offset_tagged, x10);
11934 }
11935 }
11936 }
11937 }
11938
11939
TEST(branch_tagged)11940 TEST(branch_tagged) {
11941 SETUP();
11942 START();
11943
11944 Label loop, loop_entry, done;
11945 __ Adr(x0, &loop);
11946 __ Mov(x1, 0);
11947 __ B(&loop_entry);
11948
11949 __ Bind(&loop);
11950 __ Add(x1, x1, 1); // Count successful jumps.
11951
11952 // Advance to the next tag, then bail out if we've come back around to tag 0.
11953 __ Add(x0, x0, UINT64_C(1) << kAddressTagOffset);
11954 __ Tst(x0, kAddressTagMask);
11955 __ B(eq, &done);
11956
11957 __ Bind(&loop_entry);
11958 __ Br(x0);
11959
11960 __ Bind(&done);
11961
11962 END();
11963 if (CAN_RUN()) {
11964 RUN();
11965
11966 ASSERT_EQUAL_64(1 << kAddressTagWidth, x1);
11967 }
11968 }
11969
11970
TEST(branch_and_link_tagged)11971 TEST(branch_and_link_tagged) {
11972 SETUP();
11973 START();
11974
11975 Label loop, loop_entry, done;
11976 __ Adr(x0, &loop);
11977 __ Mov(x1, 0);
11978 __ B(&loop_entry);
11979
11980 __ Bind(&loop);
11981
11982 // Bail out (before counting a successful jump) if lr appears to be tagged.
11983 __ Tst(lr, kAddressTagMask);
11984 __ B(ne, &done);
11985
11986 __ Add(x1, x1, 1); // Count successful jumps.
11987
11988 // Advance to the next tag, then bail out if we've come back around to tag 0.
11989 __ Add(x0, x0, UINT64_C(1) << kAddressTagOffset);
11990 __ Tst(x0, kAddressTagMask);
11991 __ B(eq, &done);
11992
11993 __ Bind(&loop_entry);
11994 __ Blr(x0);
11995
11996 __ Bind(&done);
11997
11998 END();
11999 if (CAN_RUN()) {
12000 RUN();
12001
12002 ASSERT_EQUAL_64(1 << kAddressTagWidth, x1);
12003 }
12004 }
12005
12006
TEST(branch_tagged_and_adr_adrp)12007 TEST(branch_tagged_and_adr_adrp) {
12008 SETUP_CUSTOM(kPageSize, PageOffsetDependentCode);
12009 START();
12010
12011 Label loop, loop_entry, done;
12012 __ Adr(x0, &loop);
12013 __ Mov(x1, 0);
12014 __ B(&loop_entry);
12015
12016 __ Bind(&loop);
12017
12018 // Bail out (before counting a successful jump) if `adr x10, ...` is tagged.
12019 __ Adr(x10, &done);
12020 __ Tst(x10, kAddressTagMask);
12021 __ B(ne, &done);
12022
12023 // Bail out (before counting a successful jump) if `adrp x11, ...` is tagged.
12024 __ Adrp(x11, &done);
12025 __ Tst(x11, kAddressTagMask);
12026 __ B(ne, &done);
12027
12028 __ Add(x1, x1, 1); // Count successful iterations.
12029
12030 // Advance to the next tag, then bail out if we've come back around to tag 0.
12031 __ Add(x0, x0, UINT64_C(1) << kAddressTagOffset);
12032 __ Tst(x0, kAddressTagMask);
12033 __ B(eq, &done);
12034
12035 __ Bind(&loop_entry);
12036 __ Br(x0);
12037
12038 __ Bind(&done);
12039
12040 END();
12041 if (CAN_RUN()) {
12042 RUN();
12043
12044 ASSERT_EQUAL_64(1 << kAddressTagWidth, x1);
12045 }
12046 }
12047
TEST(system_sys)12048 TEST(system_sys) {
12049 SETUP();
12050 const char* msg = "SYS test!";
12051 uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
12052
12053 START();
12054 __ Mov(x4, msg_addr);
12055 __ Sys(3, 0x7, 0x5, 1, x4);
12056 __ Mov(x3, x4);
12057 __ Sys(3, 0x7, 0xa, 1, x3);
12058 __ Mov(x2, x3);
12059 __ Sys(3, 0x7, 0xb, 1, x2);
12060 __ Mov(x1, x2);
12061 __ Sys(3, 0x7, 0xe, 1, x1);
12062 // TODO: Add tests to check ZVA equivalent.
12063 END();
12064
12065 if (CAN_RUN()) {
12066 RUN();
12067 }
12068 }
12069
12070
TEST(system_ic)12071 TEST(system_ic) {
12072 SETUP();
12073 const char* msg = "IC test!";
12074 uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
12075
12076 START();
12077 __ Mov(x11, msg_addr);
12078 __ Ic(IVAU, x11);
12079 END();
12080
12081 if (CAN_RUN()) {
12082 RUN();
12083 }
12084 }
12085
12086
TEST(system_dc)12087 TEST(system_dc) {
12088 SETUP();
12089 const char* msg = "DC test!";
12090 uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
12091
12092 START();
12093 __ Mov(x20, msg_addr);
12094 __ Dc(CVAC, x20);
12095 __ Mov(x21, msg_addr);
12096 __ Dc(CVAU, x21);
12097 __ Mov(x22, msg_addr);
12098 __ Dc(CIVAC, x22);
12099 // TODO: Add tests to check ZVA.
12100 END();
12101
12102 if (CAN_RUN()) {
12103 RUN();
12104 ASSERT_EQUAL_64(msg_addr, x20);
12105 ASSERT_EQUAL_64(msg_addr, x21);
12106 ASSERT_EQUAL_64(msg_addr, x22);
12107 }
12108 }
12109
12110
TEST(system_dcpop)12111 TEST(system_dcpop) {
12112 SETUP_WITH_FEATURES(CPUFeatures::kDCPoP);
12113 const char* msg = "DCPoP test!";
12114 uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
12115
12116 START();
12117 __ Mov(x20, msg_addr);
12118 __ Dc(CVAP, x20);
12119 END();
12120
12121 if (CAN_RUN()) {
12122 RUN();
12123 ASSERT_EQUAL_64(msg_addr, x20);
12124 }
12125 }
12126
TEST(system_dccvadp)12127 TEST(system_dccvadp) {
12128 SETUP_WITH_FEATURES(CPUFeatures::kDCCVADP);
12129 const char* msg = "DCCVADP test!";
12130 uintptr_t msg_addr = reinterpret_cast<uintptr_t>(msg);
12131
12132 START();
12133 __ Mov(x20, msg_addr);
12134 __ Dc(CVADP, x20);
12135 END();
12136
12137 if (CAN_RUN()) {
12138 RUN();
12139 ASSERT_EQUAL_64(msg_addr, x20);
12140 }
12141 }
12142
12143
12144 // We currently disable tests for CRC32 instructions when running natively.
12145 // Support for this family of instruction is optional, and so native platforms
12146 // may simply fail to execute the test.
TEST(crc32b)12147 TEST(crc32b) {
12148 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12149
12150 START();
12151
12152 __ Mov(w0, 0);
12153 __ Mov(w1, 0);
12154 __ Crc32b(w10, w0, w1);
12155
12156 __ Mov(w0, 0x1);
12157 __ Mov(w1, 0x138);
12158 __ Crc32b(w11, w0, w1);
12159
12160 __ Mov(w0, 0x1);
12161 __ Mov(w1, 0x38);
12162 __ Crc32b(w12, w0, w1);
12163
12164 __ Mov(w0, 0);
12165 __ Mov(w1, 128);
12166 __ Crc32b(w13, w0, w1);
12167
12168 __ Mov(w0, UINT32_MAX);
12169 __ Mov(w1, 255);
12170 __ Crc32b(w14, w0, w1);
12171
12172 __ Mov(w0, 0x00010001);
12173 __ Mov(w1, 0x10001000);
12174 __ Crc32b(w15, w0, w1);
12175
12176 END();
12177
12178 if (CAN_RUN()) {
12179 RUN();
12180
12181 ASSERT_EQUAL_64(0x0, x10);
12182 ASSERT_EQUAL_64(0x5f058808, x11);
12183 ASSERT_EQUAL_64(0x5f058808, x12);
12184 ASSERT_EQUAL_64(0xedb88320, x13);
12185 ASSERT_EQUAL_64(0x00ffffff, x14);
12186 ASSERT_EQUAL_64(0x77073196, x15);
12187 }
12188 }
12189
12190
TEST(crc32h)12191 TEST(crc32h) {
12192 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12193
12194 START();
12195
12196 __ Mov(w0, 0);
12197 __ Mov(w1, 0);
12198 __ Crc32h(w10, w0, w1);
12199
12200 __ Mov(w0, 0x1);
12201 __ Mov(w1, 0x10038);
12202 __ Crc32h(w11, w0, w1);
12203
12204 __ Mov(w0, 0x1);
12205 __ Mov(w1, 0x38);
12206 __ Crc32h(w12, w0, w1);
12207
12208 __ Mov(w0, 0);
12209 __ Mov(w1, 128);
12210 __ Crc32h(w13, w0, w1);
12211
12212 __ Mov(w0, UINT32_MAX);
12213 __ Mov(w1, 255);
12214 __ Crc32h(w14, w0, w1);
12215
12216 __ Mov(w0, 0x00010001);
12217 __ Mov(w1, 0x10001000);
12218 __ Crc32h(w15, w0, w1);
12219
12220 END();
12221
12222 if (CAN_RUN()) {
12223 RUN();
12224
12225 ASSERT_EQUAL_64(0x0, x10);
12226 ASSERT_EQUAL_64(0x0e848dba, x11);
12227 ASSERT_EQUAL_64(0x0e848dba, x12);
12228 ASSERT_EQUAL_64(0x3b83984b, x13);
12229 ASSERT_EQUAL_64(0x2d021072, x14);
12230 ASSERT_EQUAL_64(0x04ac2124, x15);
12231 }
12232 }
12233
12234
TEST(crc32w)12235 TEST(crc32w) {
12236 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12237
12238 START();
12239
12240 __ Mov(w0, 0);
12241 __ Mov(w1, 0);
12242 __ Crc32w(w10, w0, w1);
12243
12244 __ Mov(w0, 0x1);
12245 __ Mov(w1, 0x80000031);
12246 __ Crc32w(w11, w0, w1);
12247
12248 __ Mov(w0, 0);
12249 __ Mov(w1, 128);
12250 __ Crc32w(w13, w0, w1);
12251
12252 __ Mov(w0, UINT32_MAX);
12253 __ Mov(w1, 255);
12254 __ Crc32w(w14, w0, w1);
12255
12256 __ Mov(w0, 0x00010001);
12257 __ Mov(w1, 0x10001000);
12258 __ Crc32w(w15, w0, w1);
12259
12260 END();
12261
12262 if (CAN_RUN()) {
12263 RUN();
12264
12265 ASSERT_EQUAL_64(0x0, x10);
12266 ASSERT_EQUAL_64(0x1d937b81, x11);
12267 ASSERT_EQUAL_64(0xed59b63b, x13);
12268 ASSERT_EQUAL_64(0x00be2612, x14);
12269 ASSERT_EQUAL_64(0xa036e530, x15);
12270 }
12271 }
12272
12273
TEST(crc32x)12274 TEST(crc32x) {
12275 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12276
12277 START();
12278
12279 __ Mov(w0, 0);
12280 __ Mov(x1, 0);
12281 __ Crc32x(w10, w0, x1);
12282
12283 __ Mov(w0, 0x1);
12284 __ Mov(x1, UINT64_C(0x0000000800000031));
12285 __ Crc32x(w11, w0, x1);
12286
12287 __ Mov(w0, 0);
12288 __ Mov(x1, 128);
12289 __ Crc32x(w13, w0, x1);
12290
12291 __ Mov(w0, UINT32_MAX);
12292 __ Mov(x1, 255);
12293 __ Crc32x(w14, w0, x1);
12294
12295 __ Mov(w0, 0x00010001);
12296 __ Mov(x1, UINT64_C(0x1000100000000000));
12297 __ Crc32x(w15, w0, x1);
12298
12299 END();
12300
12301 if (CAN_RUN()) {
12302 RUN();
12303
12304 ASSERT_EQUAL_64(0x0, x10);
12305 ASSERT_EQUAL_64(0x40797b92, x11);
12306 ASSERT_EQUAL_64(0x533b85da, x13);
12307 ASSERT_EQUAL_64(0xbc962670, x14);
12308 ASSERT_EQUAL_64(0x0667602f, x15);
12309 }
12310 }
12311
12312
TEST(crc32cb)12313 TEST(crc32cb) {
12314 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12315
12316 START();
12317
12318 __ Mov(w0, 0);
12319 __ Mov(w1, 0);
12320 __ Crc32cb(w10, w0, w1);
12321
12322 __ Mov(w0, 0x1);
12323 __ Mov(w1, 0x138);
12324 __ Crc32cb(w11, w0, w1);
12325
12326 __ Mov(w0, 0x1);
12327 __ Mov(w1, 0x38);
12328 __ Crc32cb(w12, w0, w1);
12329
12330 __ Mov(w0, 0);
12331 __ Mov(w1, 128);
12332 __ Crc32cb(w13, w0, w1);
12333
12334 __ Mov(w0, UINT32_MAX);
12335 __ Mov(w1, 255);
12336 __ Crc32cb(w14, w0, w1);
12337
12338 __ Mov(w0, 0x00010001);
12339 __ Mov(w1, 0x10001000);
12340 __ Crc32cb(w15, w0, w1);
12341
12342 END();
12343
12344 if (CAN_RUN()) {
12345 RUN();
12346
12347 ASSERT_EQUAL_64(0x0, x10);
12348 ASSERT_EQUAL_64(0x4851927d, x11);
12349 ASSERT_EQUAL_64(0x4851927d, x12);
12350 ASSERT_EQUAL_64(0x82f63b78, x13);
12351 ASSERT_EQUAL_64(0x00ffffff, x14);
12352 ASSERT_EQUAL_64(0xf26b8203, x15);
12353 }
12354 }
12355
12356
TEST(crc32ch)12357 TEST(crc32ch) {
12358 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12359
12360 START();
12361
12362 __ Mov(w0, 0);
12363 __ Mov(w1, 0);
12364 __ Crc32ch(w10, w0, w1);
12365
12366 __ Mov(w0, 0x1);
12367 __ Mov(w1, 0x10038);
12368 __ Crc32ch(w11, w0, w1);
12369
12370 __ Mov(w0, 0x1);
12371 __ Mov(w1, 0x38);
12372 __ Crc32ch(w12, w0, w1);
12373
12374 __ Mov(w0, 0);
12375 __ Mov(w1, 128);
12376 __ Crc32ch(w13, w0, w1);
12377
12378 __ Mov(w0, UINT32_MAX);
12379 __ Mov(w1, 255);
12380 __ Crc32ch(w14, w0, w1);
12381
12382 __ Mov(w0, 0x00010001);
12383 __ Mov(w1, 0x10001000);
12384 __ Crc32ch(w15, w0, w1);
12385
12386 END();
12387
12388 if (CAN_RUN()) {
12389 RUN();
12390
12391 ASSERT_EQUAL_64(0x0, x10);
12392 ASSERT_EQUAL_64(0xcef8494c, x11);
12393 ASSERT_EQUAL_64(0xcef8494c, x12);
12394 ASSERT_EQUAL_64(0xfbc3faf9, x13);
12395 ASSERT_EQUAL_64(0xad7dacae, x14);
12396 ASSERT_EQUAL_64(0x03fc5f19, x15);
12397 }
12398 }
12399
12400
TEST(crc32cw)12401 TEST(crc32cw) {
12402 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12403
12404 START();
12405
12406 __ Mov(w0, 0);
12407 __ Mov(w1, 0);
12408 __ Crc32cw(w10, w0, w1);
12409
12410 __ Mov(w0, 0x1);
12411 __ Mov(w1, 0x80000031);
12412 __ Crc32cw(w11, w0, w1);
12413
12414 __ Mov(w0, 0);
12415 __ Mov(w1, 128);
12416 __ Crc32cw(w13, w0, w1);
12417
12418 __ Mov(w0, UINT32_MAX);
12419 __ Mov(w1, 255);
12420 __ Crc32cw(w14, w0, w1);
12421
12422 __ Mov(w0, 0x00010001);
12423 __ Mov(w1, 0x10001000);
12424 __ Crc32cw(w15, w0, w1);
12425
12426 END();
12427
12428 if (CAN_RUN()) {
12429 RUN();
12430
12431 ASSERT_EQUAL_64(0x0, x10);
12432 ASSERT_EQUAL_64(0xbcb79ece, x11);
12433 ASSERT_EQUAL_64(0x52a0c93f, x13);
12434 ASSERT_EQUAL_64(0x9f9b5c7a, x14);
12435 ASSERT_EQUAL_64(0xae1b882a, x15);
12436 }
12437 }
12438
12439
TEST(crc32cx)12440 TEST(crc32cx) {
12441 SETUP_WITH_FEATURES(CPUFeatures::kCRC32);
12442
12443 START();
12444
12445 __ Mov(w0, 0);
12446 __ Mov(x1, 0);
12447 __ Crc32cx(w10, w0, x1);
12448
12449 __ Mov(w0, 0x1);
12450 __ Mov(x1, UINT64_C(0x0000000800000031));
12451 __ Crc32cx(w11, w0, x1);
12452
12453 __ Mov(w0, 0);
12454 __ Mov(x1, 128);
12455 __ Crc32cx(w13, w0, x1);
12456
12457 __ Mov(w0, UINT32_MAX);
12458 __ Mov(x1, 255);
12459 __ Crc32cx(w14, w0, x1);
12460
12461 __ Mov(w0, 0x00010001);
12462 __ Mov(x1, UINT64_C(0x1000100000000000));
12463 __ Crc32cx(w15, w0, x1);
12464
12465 END();
12466
12467 if (CAN_RUN()) {
12468 RUN();
12469
12470 ASSERT_EQUAL_64(0x0, x10);
12471 ASSERT_EQUAL_64(0x7f320fcb, x11);
12472 ASSERT_EQUAL_64(0x34019664, x13);
12473 ASSERT_EQUAL_64(0x6cc27dd0, x14);
12474 ASSERT_EQUAL_64(0xc6f0acdb, x15);
12475 }
12476 }
12477
TEST(regress_cmp_shift_imm)12478 TEST(regress_cmp_shift_imm) {
12479 SETUP();
12480
12481 START();
12482
12483 __ Mov(x0, 0x3d720c8d);
12484 __ Cmp(x0, Operand(0x3d720c8d));
12485
12486 END();
12487 if (CAN_RUN()) {
12488 RUN();
12489
12490 ASSERT_EQUAL_NZCV(ZCFlag);
12491 }
12492 }
12493
12494
TEST(compute_address)12495 TEST(compute_address) {
12496 SETUP();
12497
12498 START();
12499 int64_t base_address = INT64_C(0x123000000abc);
12500 int64_t reg_offset = INT64_C(0x1087654321);
12501 Register base = x0;
12502 Register offset = x1;
12503
12504 __ Mov(base, base_address);
12505 __ Mov(offset, reg_offset);
12506
12507
12508 __ ComputeAddress(x2, MemOperand(base, 0));
12509 __ ComputeAddress(x3, MemOperand(base, 8));
12510 __ ComputeAddress(x4, MemOperand(base, -100));
12511
12512 __ ComputeAddress(x5, MemOperand(base, offset));
12513 __ ComputeAddress(x6, MemOperand(base, offset, LSL, 2));
12514 __ ComputeAddress(x7, MemOperand(base, offset, LSL, 4));
12515 __ ComputeAddress(x8, MemOperand(base, offset, LSL, 8));
12516
12517 __ ComputeAddress(x9, MemOperand(base, offset, SXTW));
12518 __ ComputeAddress(x10, MemOperand(base, offset, UXTW, 1));
12519 __ ComputeAddress(x11, MemOperand(base, offset, SXTW, 2));
12520 __ ComputeAddress(x12, MemOperand(base, offset, UXTW, 3));
12521
12522 END();
12523
12524 if (CAN_RUN()) {
12525 RUN();
12526
12527 ASSERT_EQUAL_64(base_address, base);
12528
12529 ASSERT_EQUAL_64(INT64_C(0x123000000abc), x2);
12530 ASSERT_EQUAL_64(INT64_C(0x123000000ac4), x3);
12531 ASSERT_EQUAL_64(INT64_C(0x123000000a58), x4);
12532
12533 ASSERT_EQUAL_64(INT64_C(0x124087654ddd), x5);
12534 ASSERT_EQUAL_64(INT64_C(0x12721d951740), x6);
12535 ASSERT_EQUAL_64(INT64_C(0x133876543ccc), x7);
12536 ASSERT_EQUAL_64(INT64_C(0x22b765432bbc), x8);
12537
12538 ASSERT_EQUAL_64(INT64_C(0x122f87654ddd), x9);
12539 ASSERT_EQUAL_64(INT64_C(0x12310eca90fe), x10);
12540 ASSERT_EQUAL_64(INT64_C(0x122e1d951740), x11);
12541 ASSERT_EQUAL_64(INT64_C(0x12343b2a23c4), x12);
12542 }
12543 }
12544
12545
TEST(far_branch_backward)12546 TEST(far_branch_backward) {
12547 // Test that the MacroAssembler correctly resolves backward branches to labels
12548 // that are outside the immediate range of branch instructions.
12549 // Take into account that backward branches can reach one instruction further
12550 // than forward branches.
12551 const int overflow_size =
12552 kInstructionSize +
12553 std::max(Instruction::GetImmBranchForwardRange(TestBranchType),
12554 std::max(Instruction::GetImmBranchForwardRange(
12555 CompareBranchType),
12556 Instruction::GetImmBranchForwardRange(CondBranchType)));
12557
12558 SETUP();
12559 START();
12560
12561 Label done, fail;
12562 Label test_tbz, test_cbz, test_bcond;
12563 Label success_tbz, success_cbz, success_bcond;
12564
12565 __ Mov(x0, 0);
12566 __ Mov(x1, 1);
12567 __ Mov(x10, 0);
12568
12569 __ B(&test_tbz);
12570 __ Bind(&success_tbz);
12571 __ Orr(x0, x0, 1 << 0);
12572 __ B(&test_cbz);
12573 __ Bind(&success_cbz);
12574 __ Orr(x0, x0, 1 << 1);
12575 __ B(&test_bcond);
12576 __ Bind(&success_bcond);
12577 __ Orr(x0, x0, 1 << 2);
12578
12579 __ B(&done);
12580
12581 // Generate enough code to overflow the immediate range of the three types of
12582 // branches below.
12583 for (unsigned i = 0; i < overflow_size / kInstructionSize; ++i) {
12584 if (i % 100 == 0) {
12585 // If we do land in this code, we do not want to execute so many nops
12586 // before reaching the end of test (especially if tracing is activated).
12587 __ B(&fail);
12588 } else {
12589 __ Nop();
12590 }
12591 }
12592 __ B(&fail);
12593
12594 __ Bind(&test_tbz);
12595 __ Tbz(x10, 7, &success_tbz);
12596 __ Bind(&test_cbz);
12597 __ Cbz(x10, &success_cbz);
12598 __ Bind(&test_bcond);
12599 __ Cmp(x10, 0);
12600 __ B(eq, &success_bcond);
12601
12602 // For each out-of-range branch instructions, at least two instructions should
12603 // have been generated.
12604 VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&test_tbz) >=
12605 7 * kInstructionSize);
12606
12607 __ Bind(&fail);
12608 __ Mov(x1, 0);
12609 __ Bind(&done);
12610
12611 END();
12612 if (CAN_RUN()) {
12613 RUN();
12614
12615 ASSERT_EQUAL_64(0x7, x0);
12616 ASSERT_EQUAL_64(0x1, x1);
12617 }
12618 }
12619
12620
TEST(single_veneer)12621 TEST(single_veneer) {
12622 SETUP();
12623 START();
12624
12625 const int max_range = Instruction::GetImmBranchForwardRange(TestBranchType);
12626
12627 Label success, fail, done;
12628
12629 __ Mov(x0, 0);
12630 __ Mov(x1, 1);
12631 __ Mov(x10, 0);
12632
12633 __ Tbz(x10, 7, &success);
12634
12635 // Generate enough code to overflow the immediate range of the `tbz`.
12636 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
12637 if (i % 100 == 0) {
12638 // If we do land in this code, we do not want to execute so many nops
12639 // before reaching the end of test (especially if tracing is activated).
12640 __ B(&fail);
12641 } else {
12642 __ Nop();
12643 }
12644 }
12645 __ B(&fail);
12646
12647 __ Bind(&success);
12648 __ Mov(x0, 1);
12649
12650 __ B(&done);
12651 __ Bind(&fail);
12652 __ Mov(x1, 0);
12653 __ Bind(&done);
12654
12655 END();
12656 if (CAN_RUN()) {
12657 RUN();
12658
12659 ASSERT_EQUAL_64(1, x0);
12660 ASSERT_EQUAL_64(1, x1);
12661 }
12662 }
12663
12664
TEST(simple_veneers)12665 TEST(simple_veneers) {
12666 // Test that the MacroAssembler correctly emits veneers for forward branches
12667 // to labels that are outside the immediate range of branch instructions.
12668 const int max_range =
12669 std::max(Instruction::GetImmBranchForwardRange(TestBranchType),
12670 std::max(Instruction::GetImmBranchForwardRange(
12671 CompareBranchType),
12672 Instruction::GetImmBranchForwardRange(CondBranchType)));
12673
12674 SETUP();
12675 START();
12676
12677 Label done, fail;
12678 Label test_tbz, test_cbz, test_bcond;
12679 Label success_tbz, success_cbz, success_bcond;
12680
12681 __ Mov(x0, 0);
12682 __ Mov(x1, 1);
12683 __ Mov(x10, 0);
12684
12685 __ Bind(&test_tbz);
12686 __ Tbz(x10, 7, &success_tbz);
12687 __ Bind(&test_cbz);
12688 __ Cbz(x10, &success_cbz);
12689 __ Bind(&test_bcond);
12690 __ Cmp(x10, 0);
12691 __ B(eq, &success_bcond);
12692
12693 // Generate enough code to overflow the immediate range of the three types of
12694 // branches below.
12695 for (unsigned i = 0; i < max_range / kInstructionSize + 1; ++i) {
12696 if (i % 100 == 0) {
12697 // If we do land in this code, we do not want to execute so many nops
12698 // before reaching the end of test (especially if tracing is activated).
12699 __ B(&fail);
12700 } else {
12701 __ Nop();
12702 }
12703 }
12704 __ B(&fail);
12705
12706 __ Bind(&success_tbz);
12707 __ Orr(x0, x0, 1 << 0);
12708 __ B(&test_cbz);
12709 __ Bind(&success_cbz);
12710 __ Orr(x0, x0, 1 << 1);
12711 __ B(&test_bcond);
12712 __ Bind(&success_bcond);
12713 __ Orr(x0, x0, 1 << 2);
12714
12715 __ B(&done);
12716 __ Bind(&fail);
12717 __ Mov(x1, 0);
12718 __ Bind(&done);
12719
12720 END();
12721 if (CAN_RUN()) {
12722 RUN();
12723
12724 ASSERT_EQUAL_64(0x7, x0);
12725 ASSERT_EQUAL_64(0x1, x1);
12726 }
12727 }
12728
12729
TEST(veneers_stress)12730 TEST(veneers_stress) {
12731 SETUP();
12732 START();
12733
12734 // This is a code generation test stressing the emission of veneers. The code
12735 // generated is not executed.
12736
12737 Label target;
12738 const unsigned max_range =
12739 Instruction::GetImmBranchForwardRange(CondBranchType);
12740 const unsigned iterations =
12741 (max_range + max_range / 4) / (4 * kInstructionSize);
12742 for (unsigned i = 0; i < iterations; i++) {
12743 __ B(&target);
12744 __ B(eq, &target);
12745 __ Cbz(x0, &target);
12746 __ Tbz(x0, 0, &target);
12747 }
12748 __ Bind(&target);
12749
12750 END();
12751 }
12752
12753
TEST(veneers_two_out_of_range)12754 TEST(veneers_two_out_of_range) {
12755 SETUP();
12756 START();
12757
12758 // This is a code generation test. The code generated is not executed.
12759 // Ensure that the MacroAssembler considers unresolved branches to chose when
12760 // a veneer pool should be emitted. We generate two branches that go out of
12761 // range at the same offset. When the MacroAssembler decides to emit the
12762 // veneer pool, the emission of a first veneer should not cause the other
12763 // branch to go out of range.
12764
12765 int range_cbz = Instruction::GetImmBranchForwardRange(CompareBranchType);
12766 int range_tbz = Instruction::GetImmBranchForwardRange(TestBranchType);
12767 int max_target = static_cast<int>(masm.GetCursorOffset()) + range_cbz;
12768
12769 Label done;
12770
12771 // We use different labels to prevent the MacroAssembler from sharing veneers.
12772 Label target_cbz, target_tbz;
12773
12774 __ Cbz(x0, &target_cbz);
12775 while (masm.GetCursorOffset() < max_target - range_tbz) {
12776 __ Nop();
12777 }
12778 __ Tbz(x0, 0, &target_tbz);
12779 while (masm.GetCursorOffset() < max_target) {
12780 __ Nop();
12781 }
12782
12783 // This additional nop makes the branches go out of range.
12784 __ Nop();
12785
12786 __ Bind(&target_cbz);
12787 __ Bind(&target_tbz);
12788
12789 END();
12790 }
12791
12792
TEST(veneers_hanging)12793 TEST(veneers_hanging) {
12794 SETUP();
12795 START();
12796
12797 // This is a code generation test. The code generated is not executed.
12798 // Ensure that the MacroAssembler considers unresolved branches to chose when
12799 // a veneer pool should be emitted. This is similar to the
12800 // 'veneers_two_out_of_range' test. We try to trigger the following situation:
12801 // b.eq label
12802 // b.eq label
12803 // ...
12804 // nop
12805 // ...
12806 // cbz x0, label
12807 // cbz x0, label
12808 // ...
12809 // tbz x0, 0 label
12810 // nop
12811 // ...
12812 // nop <- From here the `b.eq` and `cbz` instructions run out of range,
12813 // so a literal pool is required.
12814 // veneer
12815 // veneer
12816 // veneer <- The `tbz` runs out of range somewhere in the middle of the
12817 // veneer veneer pool.
12818 // veneer
12819
12820 const int range_bcond = Instruction::GetImmBranchForwardRange(CondBranchType);
12821 const int range_cbz =
12822 Instruction::GetImmBranchForwardRange(CompareBranchType);
12823 const int range_tbz = Instruction::GetImmBranchForwardRange(TestBranchType);
12824 const int max_target = static_cast<int>(masm.GetCursorOffset()) + range_bcond;
12825
12826 Label done;
12827 const int n_bcond = 100;
12828 const int n_cbz = 100;
12829 const int n_tbz = 1;
12830 const int kNTotalBranches = n_bcond + n_cbz + n_tbz;
12831
12832 // We use different labels to prevent the MacroAssembler from sharing veneers.
12833 Label labels[kNTotalBranches];
12834 for (int i = 0; i < kNTotalBranches; i++) {
12835 new (&labels[i]) Label();
12836 }
12837
12838 for (int i = 0; i < n_bcond; i++) {
12839 __ B(eq, &labels[i]);
12840 }
12841
12842 while (masm.GetCursorOffset() < max_target - range_cbz) {
12843 __ Nop();
12844 }
12845
12846 for (int i = 0; i < n_cbz; i++) {
12847 __ Cbz(x0, &labels[n_bcond + i]);
12848 }
12849
12850 // Ensure the 'tbz' will go out of range after some of the previously
12851 // generated branches.
12852 int margin = (n_bcond / 2) * kInstructionSize;
12853 while (masm.GetCursorOffset() < max_target - range_tbz + margin) {
12854 __ Nop();
12855 }
12856
12857 __ Tbz(x0, 0, &labels[n_bcond + n_cbz]);
12858
12859 while (masm.GetCursorOffset() < max_target) {
12860 __ Nop();
12861 }
12862
12863 // This additional nop makes the 'b.eq' and 'cbz' instructions go out of range
12864 // and forces the emission of a veneer pool. The 'tbz' is not yet out of
12865 // range, but will go out of range while veneers are emitted for the other
12866 // branches.
12867 // The MacroAssembler should ensure that veneers are correctly emitted for all
12868 // the branches, including the 'tbz'. Checks will fail if the target of a
12869 // branch is out of range.
12870 __ Nop();
12871
12872 for (int i = 0; i < kNTotalBranches; i++) {
12873 __ Bind(&labels[i]);
12874 }
12875
12876 END();
12877 }
12878
12879
TEST(collision_literal_veneer_pools)12880 TEST(collision_literal_veneer_pools) {
12881 SETUP_WITH_FEATURES(CPUFeatures::kFP);
12882 START();
12883
12884 // This is a code generation test. The code generated is not executed.
12885
12886 // Make sure the literal pool is empty;
12887 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
12888 ASSERT_LITERAL_POOL_SIZE(0);
12889
12890 // We chose the offsets below to (try to) trigger the following situation:
12891 // buffer offset
12892 // 0: tbz x0, 0, target_tbz ----------------------------------.
12893 // 4: nop |
12894 // ... |
12895 // nop |
12896 // literal gen: ldr s0, [pc + ...] ; load from `pool start + 0` |
12897 // ldr s0, [pc + ...] ; load from `pool start + 4` |
12898 // ... |
12899 // ldr s0, [pc + ...] |
12900 // pool start: floating-point literal (0.1) |
12901 // floating-point literal (1.1) |
12902 // ... |
12903 // floating-point literal (<n>.1) <-----tbz-max-range--'
12904 // floating-point literal (<n+1>.1)
12905 // ...
12906
12907 const int range_tbz = Instruction::GetImmBranchForwardRange(TestBranchType);
12908 const int max_target = static_cast<int>(masm.GetCursorOffset()) + range_tbz;
12909
12910 const size_t target_literal_pool_size = 100 * kInstructionSize;
12911 const int offset_start_literal_gen =
12912 target_literal_pool_size + target_literal_pool_size / 2;
12913
12914
12915 Label target_tbz;
12916
12917 __ Tbz(x0, 0, &target_tbz);
12918 VIXL_CHECK(masm.GetNumberOfPotentialVeneers() == 1);
12919 while (masm.GetCursorOffset() < max_target - offset_start_literal_gen) {
12920 __ Nop();
12921 }
12922 VIXL_CHECK(masm.GetNumberOfPotentialVeneers() == 1);
12923
12924 for (int i = 0; i < 100; i++) {
12925 // Use a different value to force one literal pool entry per iteration.
12926 __ Ldr(s0, i + 0.1);
12927 }
12928 VIXL_CHECK(masm.GetLiteralPoolSize() >= target_literal_pool_size);
12929
12930 // Force emission of a literal pool.
12931 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
12932 ASSERT_LITERAL_POOL_SIZE(0);
12933
12934 // The branch should not have gone out of range during the emission of the
12935 // literal pool.
12936 __ Bind(&target_tbz);
12937
12938 VIXL_CHECK(masm.GetNumberOfPotentialVeneers() == 0);
12939
12940 END();
12941 }
12942
12943
TEST(ldr_literal_explicit)12944 TEST(ldr_literal_explicit) {
12945 SETUP();
12946
12947 START();
12948 Literal<int64_t> automatically_placed_literal(1, masm.GetLiteralPool());
12949 Literal<int64_t> manually_placed_literal(2);
12950 {
12951 ExactAssemblyScope scope(&masm, kInstructionSize + sizeof(int64_t));
12952 Label over_literal;
12953 __ b(&over_literal);
12954 __ place(&manually_placed_literal);
12955 __ bind(&over_literal);
12956 }
12957 __ Ldr(x1, &manually_placed_literal);
12958 __ Ldr(x2, &automatically_placed_literal);
12959 __ Add(x0, x1, x2);
12960 END();
12961
12962 if (CAN_RUN()) {
12963 RUN();
12964
12965 ASSERT_EQUAL_64(3, x0);
12966 }
12967 }
12968
12969
TEST(ldr_literal_automatically_placed)12970 TEST(ldr_literal_automatically_placed) {
12971 SETUP_WITH_FEATURES(CPUFeatures::kFP);
12972
12973 START();
12974
12975 // We start with an empty literal pool.
12976 ASSERT_LITERAL_POOL_SIZE(0);
12977
12978 // Create a literal that should be placed by the literal pool.
12979 Literal<int64_t> explicit_literal(2, masm.GetLiteralPool());
12980 // It should not appear in the literal pool until its first use.
12981 ASSERT_LITERAL_POOL_SIZE(0);
12982
12983 // Check that using standard literals does not break the use of explicitly
12984 // created literals.
12985 __ Ldr(d1, 1.1);
12986 ASSERT_LITERAL_POOL_SIZE(8);
12987 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
12988 ASSERT_LITERAL_POOL_SIZE(0);
12989
12990 __ Ldr(x2, &explicit_literal);
12991 ASSERT_LITERAL_POOL_SIZE(8);
12992 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
12993 ASSERT_LITERAL_POOL_SIZE(0);
12994
12995 __ Ldr(d3, 3.3);
12996 ASSERT_LITERAL_POOL_SIZE(8);
12997 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
12998 ASSERT_LITERAL_POOL_SIZE(0);
12999
13000 // Re-use our explicitly created literal. It has already been placed, so it
13001 // should not impact the literal pool.
13002 __ Ldr(x4, &explicit_literal);
13003 ASSERT_LITERAL_POOL_SIZE(0);
13004
13005 END();
13006
13007 if (CAN_RUN()) {
13008 RUN();
13009
13010 ASSERT_EQUAL_FP64(1.1, d1);
13011 ASSERT_EQUAL_64(2, x2);
13012 ASSERT_EQUAL_FP64(3.3, d3);
13013 ASSERT_EQUAL_64(2, x4);
13014 }
13015 }
13016
13017
TEST(literal_update_overwrite)13018 TEST(literal_update_overwrite) {
13019 SETUP();
13020
13021 START();
13022
13023 ASSERT_LITERAL_POOL_SIZE(0);
13024 LiteralPool* literal_pool = masm.GetLiteralPool();
13025
13026 Literal<int32_t> lit_32_update_before_pool(0xbad, literal_pool);
13027 Literal<int32_t> lit_32_update_after_pool(0xbad, literal_pool);
13028 Literal<int64_t> lit_64_update_before_pool(0xbad, literal_pool);
13029 Literal<int64_t> lit_64_update_after_pool(0xbad, literal_pool);
13030
13031 ASSERT_LITERAL_POOL_SIZE(0);
13032
13033 lit_32_update_before_pool.UpdateValue(32);
13034 lit_64_update_before_pool.UpdateValue(64);
13035
13036 __ Ldr(w1, &lit_32_update_before_pool);
13037 __ Ldr(x2, &lit_64_update_before_pool);
13038 __ Ldr(w3, &lit_32_update_after_pool);
13039 __ Ldr(x4, &lit_64_update_after_pool);
13040
13041 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
13042
13043 VIXL_ASSERT(lit_32_update_after_pool.IsPlaced());
13044 VIXL_ASSERT(lit_64_update_after_pool.IsPlaced());
13045 lit_32_update_after_pool.UpdateValue(128, &masm);
13046 lit_64_update_after_pool.UpdateValue(256, &masm);
13047
13048 END();
13049
13050 if (CAN_RUN()) {
13051 RUN();
13052
13053 ASSERT_EQUAL_64(32, x1);
13054 ASSERT_EQUAL_64(64, x2);
13055 ASSERT_EQUAL_64(128, x3);
13056 ASSERT_EQUAL_64(256, x4);
13057 }
13058 }
13059
13060
TEST(literal_deletion_policies)13061 TEST(literal_deletion_policies) {
13062 SETUP();
13063
13064 START();
13065
13066 // We cannot check exactly when the deletion of the literals occur, but we
13067 // check that usage of the deletion policies is not broken.
13068
13069 ASSERT_LITERAL_POOL_SIZE(0);
13070 LiteralPool* literal_pool = masm.GetLiteralPool();
13071
13072 Literal<int32_t> lit_manual(0xbad, literal_pool);
13073 Literal<int32_t>* lit_deleted_on_placement =
13074 new Literal<int32_t>(0xbad,
13075 literal_pool,
13076 RawLiteral::kDeletedOnPlacementByPool);
13077 Literal<int32_t>* lit_deleted_on_pool_destruction =
13078 new Literal<int32_t>(0xbad,
13079 literal_pool,
13080 RawLiteral::kDeletedOnPoolDestruction);
13081
13082 ASSERT_LITERAL_POOL_SIZE(0);
13083
13084 lit_manual.UpdateValue(32);
13085 lit_deleted_on_placement->UpdateValue(64);
13086
13087 __ Ldr(w1, &lit_manual);
13088 __ Ldr(w2, lit_deleted_on_placement);
13089 __ Ldr(w3, lit_deleted_on_pool_destruction);
13090
13091 masm.EmitLiteralPool(LiteralPool::kBranchRequired);
13092
13093 VIXL_ASSERT(lit_manual.IsPlaced());
13094 VIXL_ASSERT(lit_deleted_on_pool_destruction->IsPlaced());
13095 lit_deleted_on_pool_destruction->UpdateValue(128, &masm);
13096
13097 END();
13098
13099 if (CAN_RUN()) {
13100 RUN();
13101
13102 ASSERT_EQUAL_64(32, x1);
13103 ASSERT_EQUAL_64(64, x2);
13104 ASSERT_EQUAL_64(128, x3);
13105 }
13106 }
13107
13108
TEST(generic_operand)13109 TEST(generic_operand) {
13110 SETUP_WITH_FEATURES(CPUFeatures::kFP);
13111
13112 int32_t data_32_array[5] = {0xbadbeef,
13113 0x11111111,
13114 0xbadbeef,
13115 0x33333333,
13116 0xbadbeef};
13117 int64_t data_64_array[5] = {INT64_C(0xbadbadbadbeef),
13118 INT64_C(0x1111111111111111),
13119 INT64_C(0xbadbadbadbeef),
13120 INT64_C(0x3333333333333333),
13121 INT64_C(0xbadbadbadbeef)};
13122 size_t size_32 = sizeof(data_32_array[0]);
13123 size_t size_64 = sizeof(data_64_array[0]);
13124
13125 START();
13126
13127 intptr_t data_32_address = reinterpret_cast<intptr_t>(&data_32_array[0]);
13128 intptr_t data_64_address = reinterpret_cast<intptr_t>(&data_64_array[0]);
13129 Register data_32 = x27;
13130 Register data_64 = x28;
13131 __ Mov(data_32, data_32_address);
13132 __ Mov(data_64, data_64_address);
13133
13134 __ Move(GenericOperand(w0),
13135 GenericOperand(MemOperand(data_32, 1 * size_32), size_32));
13136 __ Move(GenericOperand(s0),
13137 GenericOperand(MemOperand(data_32, 3 * size_32), size_32));
13138 __ Move(GenericOperand(x10),
13139 GenericOperand(MemOperand(data_64, 1 * size_64), size_64));
13140 __ Move(GenericOperand(d10),
13141 GenericOperand(MemOperand(data_64, 3 * size_64), size_64));
13142
13143 __ Move(GenericOperand(w1), GenericOperand(w0));
13144 __ Move(GenericOperand(s1), GenericOperand(s0));
13145 __ Move(GenericOperand(x11), GenericOperand(x10));
13146 __ Move(GenericOperand(d11), GenericOperand(d10));
13147
13148 __ Move(GenericOperand(MemOperand(data_32, 0 * size_32), size_32),
13149 GenericOperand(w1));
13150 __ Move(GenericOperand(MemOperand(data_32, 2 * size_32), size_32),
13151 GenericOperand(s1));
13152 __ Move(GenericOperand(MemOperand(data_64, 0 * size_64), size_64),
13153 GenericOperand(x11));
13154 __ Move(GenericOperand(MemOperand(data_64, 2 * size_64), size_64),
13155 GenericOperand(d11));
13156
13157 __ Move(GenericOperand(MemOperand(data_32, 4 * size_32), size_32),
13158 GenericOperand(MemOperand(data_32, 0 * size_32), size_32));
13159 __ Move(GenericOperand(MemOperand(data_64, 4 * size_64), size_64),
13160 GenericOperand(MemOperand(data_64, 0 * size_64), size_64));
13161 END();
13162
13163 if (CAN_RUN()) {
13164 RUN();
13165
13166 ASSERT_EQUAL_64(data_32_address, data_32);
13167 ASSERT_EQUAL_64(data_64_address, data_64);
13168
13169 ASSERT_EQUAL_32(0x11111111, w0);
13170 ASSERT_EQUAL_32(0x33333333, core.sreg_bits(0));
13171 ASSERT_EQUAL_64(INT64_C(0x1111111111111111), x10);
13172 ASSERT_EQUAL_64(INT64_C(0x3333333333333333), core.dreg_bits(10));
13173
13174 ASSERT_EQUAL_32(0x11111111, w1);
13175 ASSERT_EQUAL_32(0x33333333, core.sreg_bits(1));
13176 ASSERT_EQUAL_64(INT64_C(0x1111111111111111), x11);
13177 ASSERT_EQUAL_64(INT64_C(0x3333333333333333), core.dreg_bits(11));
13178
13179 VIXL_CHECK(data_32_array[0] == 0x11111111);
13180 VIXL_CHECK(data_32_array[1] == 0x11111111);
13181 VIXL_CHECK(data_32_array[2] == 0x33333333);
13182 VIXL_CHECK(data_32_array[3] == 0x33333333);
13183 VIXL_CHECK(data_32_array[4] == 0x11111111);
13184
13185 VIXL_CHECK(data_64_array[0] == INT64_C(0x1111111111111111));
13186 VIXL_CHECK(data_64_array[1] == INT64_C(0x1111111111111111));
13187 VIXL_CHECK(data_64_array[2] == INT64_C(0x3333333333333333));
13188 VIXL_CHECK(data_64_array[3] == INT64_C(0x3333333333333333));
13189 VIXL_CHECK(data_64_array[4] == INT64_C(0x1111111111111111));
13190 }
13191 }
13192
13193
13194 // Test feature detection of calls to runtime functions.
13195
13196 // C++11 should be sufficient to provide simulated runtime calls, except for a
13197 // GCC bug before 4.9.1.
13198 #if defined(VIXL_INCLUDE_SIMULATOR_AARCH64) && (__cplusplus >= 201103L) && \
13199 (defined(__clang__) || GCC_VERSION_OR_NEWER(4, 9, 1)) && \
13200 !defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT)
13201 #error \
13202 "C++11 should be sufficient to provide support for simulated runtime calls."
13203 #endif // #if defined(VIXL_INCLUDE_SIMULATOR_AARCH64) && ...
13204
13205 #if (__cplusplus >= 201103L) && \
13206 !defined(VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT)
13207 #error \
13208 "C++11 should be sufficient to provide support for `MacroAssembler::CallRuntime()`."
13209 #endif // #if (__cplusplus >= 201103L) && ...
13210
13211 #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
runtime_call_add_one(int32_t a)13212 int32_t runtime_call_add_one(int32_t a) { return a + 1; }
13213
runtime_call_add_doubles(double a,double b,double c)13214 double runtime_call_add_doubles(double a, double b, double c) {
13215 return a + b + c;
13216 }
13217
runtime_call_one_argument_on_stack(int64_t arg1,int64_t arg2,int64_t arg3,int64_t arg4,int64_t arg5,int64_t arg6,int64_t arg7,int64_t arg8,int64_t arg9)13218 int64_t runtime_call_one_argument_on_stack(int64_t arg1 __attribute__((unused)),
13219 int64_t arg2 __attribute__((unused)),
13220 int64_t arg3 __attribute__((unused)),
13221 int64_t arg4 __attribute__((unused)),
13222 int64_t arg5 __attribute__((unused)),
13223 int64_t arg6 __attribute__((unused)),
13224 int64_t arg7 __attribute__((unused)),
13225 int64_t arg8 __attribute__((unused)),
13226 int64_t arg9) {
13227 return arg9;
13228 }
13229
runtime_call_two_arguments_on_stack(int64_t arg1,int64_t arg2,int64_t arg3,int64_t arg4,int64_t arg5,int64_t arg6,int64_t arg7,int64_t arg8,double arg9,double arg10)13230 double runtime_call_two_arguments_on_stack(int64_t arg1 __attribute__((unused)),
13231 int64_t arg2 __attribute__((unused)),
13232 int64_t arg3 __attribute__((unused)),
13233 int64_t arg4 __attribute__((unused)),
13234 int64_t arg5 __attribute__((unused)),
13235 int64_t arg6 __attribute__((unused)),
13236 int64_t arg7 __attribute__((unused)),
13237 int64_t arg8 __attribute__((unused)),
13238 double arg9,
13239 double arg10) {
13240 return arg9 - arg10;
13241 }
13242
runtime_call_store_at_address(int64_t * address)13243 void runtime_call_store_at_address(int64_t* address) { *address = 0xf00d; }
13244
13245 enum RuntimeCallTestEnum { Enum0 };
13246
runtime_call_enum(RuntimeCallTestEnum e)13247 RuntimeCallTestEnum runtime_call_enum(RuntimeCallTestEnum e) { return e; }
13248
13249 enum class RuntimeCallTestEnumClass { Enum0 };
13250
runtime_call_enum_class(RuntimeCallTestEnumClass e)13251 RuntimeCallTestEnumClass runtime_call_enum_class(RuntimeCallTestEnumClass e) {
13252 return e;
13253 }
13254
test_int8_t(int8_t x)13255 int8_t test_int8_t(int8_t x) { return x; }
test_uint8_t(uint8_t x)13256 uint8_t test_uint8_t(uint8_t x) { return x; }
test_int16_t(int16_t x)13257 int16_t test_int16_t(int16_t x) { return x; }
test_uint16_t(uint16_t x)13258 uint16_t test_uint16_t(uint16_t x) { return x; }
13259
TEST(runtime_calls)13260 TEST(runtime_calls) {
13261 SETUP_WITH_FEATURES(CPUFeatures::kFP);
13262
13263 #ifndef VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT
13264 if (masm.GenerateSimulatorCode()) {
13265 // This configuration is unsupported and a `VIXL_UNREACHABLE()` would fire
13266 // while trying to generate `CallRuntime`. This configuration should only be
13267 // reachable with C++11 and a (buggy) version of GCC pre-4.9.1.
13268 return;
13269 }
13270 #endif
13271
13272 START();
13273
13274 // Test `CallRuntime`.
13275
13276 __ Mov(w0, 0);
13277 __ CallRuntime(runtime_call_add_one);
13278 __ Mov(w20, w0);
13279
13280 __ Fmov(d0, 0.0);
13281 __ Fmov(d1, 1.5);
13282 __ Fmov(d2, 2.5);
13283 __ CallRuntime(runtime_call_add_doubles);
13284 __ Fmov(d20, d0);
13285
13286 __ Mov(x0, 0x123);
13287 __ Push(x0, x0);
13288 __ CallRuntime(runtime_call_one_argument_on_stack);
13289 __ Mov(x21, x0);
13290 __ Pop(x0, x1);
13291
13292 __ Fmov(d0, 314.0);
13293 __ Fmov(d1, 4.0);
13294 __ Push(d1, d0);
13295 __ CallRuntime(runtime_call_two_arguments_on_stack);
13296 __ Fmov(d21, d0);
13297 __ Pop(d1, d0);
13298
13299 // Test that the template mechanisms don't break with enums.
13300 __ Mov(w0, 0);
13301 __ CallRuntime(runtime_call_enum);
13302 __ Mov(w0, 0);
13303 __ CallRuntime(runtime_call_enum_class);
13304
13305 // Test `TailCallRuntime`.
13306
13307 Label function, after_function;
13308 __ B(&after_function);
13309 __ Bind(&function);
13310 __ Mov(x22, 0);
13311 __ Mov(w0, 123);
13312 __ TailCallRuntime(runtime_call_add_one);
13313 // Control should not fall through.
13314 __ Mov(x22, 0xbad);
13315 __ Ret();
13316 __ Bind(&after_function);
13317
13318 // Call our placeholder function, taking care to preserve the link register.
13319 __ Push(ip0, lr);
13320 __ Bl(&function);
13321 __ Pop(lr, ip0);
13322 // Save the result.
13323 __ Mov(w23, w0);
13324
13325 __ Mov(x24, 0);
13326 int test_values[] = {static_cast<int8_t>(-1),
13327 static_cast<uint8_t>(-1),
13328 static_cast<int16_t>(-1),
13329 static_cast<uint16_t>(-1),
13330 -256,
13331 -1,
13332 0,
13333 1,
13334 256};
13335 for (size_t i = 0; i < sizeof(test_values) / sizeof(test_values[0]); ++i) {
13336 Label pass_int8, pass_uint8, pass_int16, pass_uint16;
13337 int x = test_values[i];
13338 __ Mov(w0, x);
13339 __ CallRuntime(test_int8_t);
13340 __ Sxtb(w0, w0);
13341 __ Cmp(w0, ExtractSignedBitfield32(7, 0, x));
13342 __ Cinc(x24, x24, ne);
13343 __ Mov(w0, x);
13344 __ CallRuntime(test_uint8_t);
13345 __ Uxtb(w0, w0);
13346 __ Cmp(w0, ExtractUnsignedBitfield32(7, 0, x));
13347 __ Cinc(x24, x24, ne);
13348 __ Mov(w0, x);
13349 __ CallRuntime(test_int16_t);
13350 __ Sxth(w0, w0);
13351 __ Cmp(w0, ExtractSignedBitfield32(15, 0, x));
13352 __ Cinc(x24, x24, ne);
13353 __ Mov(w0, x);
13354 __ CallRuntime(test_uint16_t);
13355 __ Uxth(w0, w0);
13356 __ Cmp(w0, ExtractUnsignedBitfield32(15, 0, x));
13357 __ Cinc(x24, x24, ne);
13358 }
13359
13360
13361 int64_t value = 0xbadbeef;
13362 __ Mov(x0, reinterpret_cast<uint64_t>(&value));
13363 __ CallRuntime(runtime_call_store_at_address);
13364
13365 END();
13366
13367 #if defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT) || \
13368 !defined(VIXL_INCLUDE_SIMULATOR_AARCH64)
13369 if (CAN_RUN()) {
13370 RUN();
13371
13372 ASSERT_EQUAL_32(1, w20);
13373 ASSERT_EQUAL_FP64(4.0, d20);
13374 ASSERT_EQUAL_64(0x123, x21);
13375 ASSERT_EQUAL_FP64(310.0, d21);
13376 VIXL_CHECK(value == 0xf00d);
13377 ASSERT_EQUAL_64(0, x22);
13378 ASSERT_EQUAL_32(124, w23);
13379 ASSERT_EQUAL_64(0, x24);
13380 }
13381 #endif // #if defined(VIXL_HAS_SIMULATED_RUNTIME_CALL_SUPPORT) || ...
13382 }
13383 #endif // #ifdef VIXL_HAS_MACROASSEMBLER_RUNTIME_CALL_SUPPORT
13384
13385
TEST(optimised_mov_register)13386 TEST(optimised_mov_register) {
13387 SETUP();
13388
13389 START();
13390 Label start;
13391 __ Bind(&start);
13392 __ Mov(x0, x0);
13393 VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&start) == 0);
13394 __ Mov(w0, w0, kDiscardForSameWReg);
13395 VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&start) == 0);
13396 __ Mov(w0, w0);
13397 VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&start) == kInstructionSize);
13398
13399 END();
13400
13401 if (CAN_RUN()) {
13402 RUN();
13403 }
13404 }
13405
13406
TEST(nop)13407 TEST(nop) {
13408 MacroAssembler masm;
13409
13410 Label start;
13411 __ Bind(&start);
13412 __ Nop();
13413 // `MacroAssembler::Nop` must generate at least one nop.
13414 VIXL_CHECK(masm.GetSizeOfCodeGeneratedSince(&start) >= kInstructionSize);
13415
13416 masm.FinalizeCode();
13417 }
13418
13419 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
13420 // Test the pseudo-instructions that control CPUFeatures dynamically in the
13421 // Simulator. These are used by the test infrastructure itself, but in a fairly
13422 // limited way.
13423
RunHelperWithFeatureCombinations(void (* helper)(const CPUFeatures & base,const CPUFeatures & f))13424 static void RunHelperWithFeatureCombinations(
13425 void (*helper)(const CPUFeatures& base, const CPUFeatures& f)) {
13426 // Iterate, testing the first n features in this list.
13427 CPUFeatures::Feature features[] = {
13428 // Put kNone first, so that the first iteration uses an empty feature set.
13429 CPUFeatures::kNone,
13430 // The remaining features used are arbitrary.
13431 CPUFeatures::kIDRegisterEmulation,
13432 CPUFeatures::kDCPoP,
13433 CPUFeatures::kPAuth,
13434 CPUFeatures::kFcma,
13435 CPUFeatures::kAES,
13436 CPUFeatures::kNEON,
13437 CPUFeatures::kCRC32,
13438 CPUFeatures::kFP,
13439 CPUFeatures::kPmull1Q,
13440 CPUFeatures::kSM4,
13441 CPUFeatures::kSM3,
13442 CPUFeatures::kDotProduct,
13443 };
13444 VIXL_ASSERT(CPUFeatures(CPUFeatures::kNone) == CPUFeatures::None());
13445 // The features are not necessarily encoded in kInstructionSize-sized slots,
13446 // so the MacroAssembler must pad the list to align the following instruction.
13447 // Ensure that we have enough features in the list to cover all interesting
13448 // alignment cases, even if the highest common factor of kInstructionSize and
13449 // an encoded feature is one.
13450 VIXL_STATIC_ASSERT(ARRAY_SIZE(features) > kInstructionSize);
13451
13452 CPUFeatures base = CPUFeatures::None();
13453 for (size_t i = 0; i < ARRAY_SIZE(features); i++) {
13454 base.Combine(features[i]);
13455 CPUFeatures f = CPUFeatures::None();
13456 for (size_t j = 0; j < ARRAY_SIZE(features); j++) {
13457 f.Combine(features[j]);
13458 helper(base, f);
13459 }
13460 }
13461 }
13462
SetSimulatorCPUFeaturesHelper(const CPUFeatures & base,const CPUFeatures & f)13463 static void SetSimulatorCPUFeaturesHelper(const CPUFeatures& base,
13464 const CPUFeatures& f) {
13465 SETUP_WITH_FEATURES(base);
13466 START();
13467
13468 __ SetSimulatorCPUFeatures(f);
13469
13470 END();
13471 if (CAN_RUN()) {
13472 RUN_WITHOUT_SEEN_FEATURE_CHECK();
13473 VIXL_CHECK(*(simulator.GetCPUFeatures()) == f);
13474 }
13475 }
13476
TEST(configure_cpu_features_set)13477 TEST(configure_cpu_features_set) {
13478 RunHelperWithFeatureCombinations(SetSimulatorCPUFeaturesHelper);
13479 }
13480
EnableSimulatorCPUFeaturesHelper(const CPUFeatures & base,const CPUFeatures & f)13481 static void EnableSimulatorCPUFeaturesHelper(const CPUFeatures& base,
13482 const CPUFeatures& f) {
13483 SETUP_WITH_FEATURES(base);
13484 START();
13485
13486 __ EnableSimulatorCPUFeatures(f);
13487
13488 END();
13489 if (CAN_RUN()) {
13490 RUN_WITHOUT_SEEN_FEATURE_CHECK();
13491 VIXL_CHECK(*(simulator.GetCPUFeatures()) == base.With(f));
13492 }
13493 }
13494
TEST(configure_cpu_features_enable)13495 TEST(configure_cpu_features_enable) {
13496 RunHelperWithFeatureCombinations(EnableSimulatorCPUFeaturesHelper);
13497 }
13498
DisableSimulatorCPUFeaturesHelper(const CPUFeatures & base,const CPUFeatures & f)13499 static void DisableSimulatorCPUFeaturesHelper(const CPUFeatures& base,
13500 const CPUFeatures& f) {
13501 SETUP_WITH_FEATURES(base);
13502 START();
13503
13504 __ DisableSimulatorCPUFeatures(f);
13505
13506 END();
13507 if (CAN_RUN()) {
13508 RUN_WITHOUT_SEEN_FEATURE_CHECK();
13509 VIXL_CHECK(*(simulator.GetCPUFeatures()) == base.Without(f));
13510 }
13511 }
13512
TEST(configure_cpu_features_disable)13513 TEST(configure_cpu_features_disable) {
13514 RunHelperWithFeatureCombinations(DisableSimulatorCPUFeaturesHelper);
13515 }
13516
SaveRestoreSimulatorCPUFeaturesHelper(const CPUFeatures & base,const CPUFeatures & f)13517 static void SaveRestoreSimulatorCPUFeaturesHelper(const CPUFeatures& base,
13518 const CPUFeatures& f) {
13519 SETUP_WITH_FEATURES(base);
13520 START();
13521
13522 {
13523 __ SaveSimulatorCPUFeatures();
13524 __ SetSimulatorCPUFeatures(f);
13525 {
13526 __ SaveSimulatorCPUFeatures();
13527 __ SetSimulatorCPUFeatures(CPUFeatures::All());
13528 __ RestoreSimulatorCPUFeatures();
13529 }
13530 __ RestoreSimulatorCPUFeatures();
13531 }
13532
13533 END();
13534 if (CAN_RUN()) {
13535 RUN_WITHOUT_SEEN_FEATURE_CHECK();
13536 VIXL_CHECK(*(simulator.GetCPUFeatures()) == base);
13537 }
13538 }
13539
TEST(configure_cpu_features_save_restore)13540 TEST(configure_cpu_features_save_restore) {
13541 RunHelperWithFeatureCombinations(SaveRestoreSimulatorCPUFeaturesHelper);
13542 }
13543
SimulationCPUFeaturesScopeHelper(const CPUFeatures & base,const CPUFeatures & f)13544 static void SimulationCPUFeaturesScopeHelper(const CPUFeatures& base,
13545 const CPUFeatures& f) {
13546 SETUP_WITH_FEATURES(base);
13547 START();
13548
13549 {
13550 SimulationCPUFeaturesScope scope_a(&masm, f);
13551 {
13552 SimulationCPUFeaturesScope scope_b(&masm, CPUFeatures::All());
13553 {
13554 SimulationCPUFeaturesScope scope_c(&masm, CPUFeatures::None());
13555 // The scope arguments should combine with 'Enable', so we should be
13556 // able to use any CPUFeatures here.
13557 __ Fadd(v0.V4S(), v1.V4S(), v2.V4S()); // Requires {FP, NEON}.
13558 }
13559 }
13560 }
13561
13562 END();
13563 if (CAN_RUN()) {
13564 RUN_WITHOUT_SEEN_FEATURE_CHECK();
13565 VIXL_CHECK(*(simulator.GetCPUFeatures()) == base);
13566 }
13567 }
13568
TEST(configure_cpu_features_scope)13569 TEST(configure_cpu_features_scope) {
13570 RunHelperWithFeatureCombinations(SimulationCPUFeaturesScopeHelper);
13571 }
13572 #endif
13573
13574
13575 #ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
TEST(large_sim_stack)13576 TEST(large_sim_stack) {
13577 SimStack builder;
13578 builder.SetUsableSize(16 * 1024); // The default is 8kB.
13579 SimStack::Allocated stack = builder.Allocate();
13580 uintptr_t base = reinterpret_cast<uintptr_t>(stack.GetBase());
13581 uintptr_t limit = reinterpret_cast<uintptr_t>(stack.GetLimit());
13582 SETUP_CUSTOM_SIM(std::move(stack));
13583 START();
13584
13585 // Check that we can access the extremes of the stack.
13586 __ Mov(x0, base);
13587 __ Mov(x1, limit);
13588 __ Mov(x2, sp);
13589 __ Add(sp, x1, 1); // Avoid accessing memory below `sp`.
13590
13591 __ Mov(x10, 42);
13592 __ Poke(x10, 0);
13593 __ Peek(x10, base - limit - kXRegSizeInBytes - 1);
13594
13595 __ Mov(sp, x2);
13596
13597 END();
13598 if (CAN_RUN()) {
13599 RUN();
13600 }
13601 }
13602
13603 #ifdef VIXL_NEGATIVE_TESTING
TEST(sim_stack_limit_guard_read)13604 TEST(sim_stack_limit_guard_read) {
13605 SimStack builder;
13606 SimStack::Allocated stack = builder.Allocate();
13607 uintptr_t limit = reinterpret_cast<uintptr_t>(stack.GetLimit());
13608 SETUP_CUSTOM_SIM(std::move(stack));
13609 START();
13610
13611 __ Mov(x1, limit);
13612 __ Mov(x2, sp);
13613 __ Add(sp, x1, 1); // Avoid accessing memory below `sp`.
13614
13615 // `sp` points to the lowest usable byte of the stack.
13616 __ Mov(w10, 42);
13617 __ Ldrb(w10, MemOperand(sp, -1));
13618
13619 __ Mov(sp, x2);
13620
13621 END();
13622 if (CAN_RUN()) {
13623 MUST_FAIL_WITH_MESSAGE(RUN(), "Attempt to read from stack guard region");
13624 }
13625 }
13626
TEST(sim_stack_limit_guard_write)13627 TEST(sim_stack_limit_guard_write) {
13628 SimStack builder;
13629 SimStack::Allocated stack = builder.Allocate();
13630 uintptr_t limit = reinterpret_cast<uintptr_t>(stack.GetLimit());
13631 SETUP_CUSTOM_SIM(std::move(stack));
13632 START();
13633
13634 __ Mov(x1, limit);
13635 __ Mov(x2, sp);
13636 __ Add(sp, x1, 1); // Avoid accessing memory below `sp`.
13637
13638 // `sp` points to the lowest usable byte of the stack.
13639 __ Mov(w10, 42);
13640 __ Strb(w10, MemOperand(sp, -1));
13641
13642 __ Mov(sp, x2);
13643
13644 END();
13645 if (CAN_RUN()) {
13646 MUST_FAIL_WITH_MESSAGE(RUN(), "Attempt to write to stack guard region");
13647 }
13648 }
13649
TEST(sim_stack_base_guard_read)13650 TEST(sim_stack_base_guard_read) {
13651 SimStack builder;
13652 SimStack::Allocated stack = builder.Allocate();
13653 uintptr_t base = reinterpret_cast<uintptr_t>(stack.GetBase());
13654 SETUP_CUSTOM_SIM(std::move(stack));
13655 START();
13656
13657 __ Mov(x0, base);
13658 // `base` (x0) is the byte after the highest usable byte of the stack.
13659 // The last byte of this access will hit the guard region.
13660 __ Mov(x10, 42);
13661 __ Ldr(x10, MemOperand(x0, -static_cast<int64_t>(kXRegSizeInBytes) + 1));
13662
13663 END();
13664 if (CAN_RUN()) {
13665 MUST_FAIL_WITH_MESSAGE(RUN(), "Attempt to read from stack guard region");
13666 }
13667 }
13668
TEST(sim_stack_base_guard_write)13669 TEST(sim_stack_base_guard_write) {
13670 SimStack builder;
13671 SimStack::Allocated stack = builder.Allocate();
13672 uintptr_t base = reinterpret_cast<uintptr_t>(stack.GetBase());
13673 SETUP_CUSTOM_SIM(std::move(stack));
13674 START();
13675
13676 __ Mov(x0, base);
13677 // `base` (x0) is the byte after the highest usable byte of the stack.
13678 // The last byte of this access will hit the guard region.
13679 __ Mov(x10, 42);
13680 __ Str(x10, MemOperand(x0, -static_cast<int64_t>(kXRegSizeInBytes) + 1));
13681
13682 END();
13683 if (CAN_RUN()) {
13684 MUST_FAIL_WITH_MESSAGE(RUN(), "Attempt to write to stack guard region");
13685 }
13686 }
13687 #endif
13688 #endif
13689
13690 } // namespace aarch64
13691 } // namespace vixl
13692