1# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s 3# 4# Test producing a G_REV from an appropriate G_SHUFFLE_VECTOR. 5 6... 7--- 8name: rev64_mask_1_0 9alignment: 4 10legalized: true 11tracksRegLiveness: true 12body: | 13 bb.1.entry: 14 liveins: $d0, $d1 15 ; CHECK-LABEL: name: rev64_mask_1_0 16 ; CHECK: liveins: $d0, $d1 17 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0 18 ; CHECK: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]] 19 ; CHECK: $d0 = COPY [[REV64_]](<2 x s32>) 20 ; CHECK: RET_ReallyLR implicit $d0 21 %0:_(<2 x s32>) = COPY $d0 22 %1:_(<2 x s32>) = COPY $d1 23 %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 0) 24 $d0 = COPY %2(<2 x s32>) 25 RET_ReallyLR implicit $d0 26 27... 28--- 29name: rev64_mask_1_undef 30alignment: 4 31legalized: true 32tracksRegLiveness: true 33body: | 34 bb.1.entry: 35 liveins: $d0, $d1 36 ; CHECK-LABEL: name: rev64_mask_1_undef 37 ; CHECK: liveins: $d0, $d1 38 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0 39 ; CHECK: [[REV64_:%[0-9]+]]:_(<2 x s32>) = G_REV64 [[COPY]] 40 ; CHECK: $d0 = COPY [[REV64_]](<2 x s32>) 41 ; CHECK: RET_ReallyLR implicit $d0 42 %0:_(<2 x s32>) = COPY $d0 43 %1:_(<2 x s32>) = COPY $d1 44 %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, undef) 45 $d0 = COPY %2(<2 x s32>) 46 RET_ReallyLR implicit $d0 47 48... 49--- 50name: no_rev64_mask_1 51alignment: 4 52legalized: true 53tracksRegLiveness: true 54body: | 55 bb.1.entry: 56 liveins: $d0, $d1 57 58 ; Verify that we don't produce a G_REV64 when 59 ; 60 ; M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts) 61 ; In this example, BlockElts = 2 62 ; 63 ; At i = 1 64 ; M[i] = 3 65 ; i % BlockElts = i % 2 = 1 66 ; 67 ; So 68 ; 69 ; 3 != (1 - 1) + (2 - 1 - 1) 70 ; 3 != 0 71 ; 72 ; And so we should not produce a G_REV64. 73 ; 74 ; CHECK-LABEL: name: no_rev64_mask_1 75 ; CHECK: liveins: $d0, $d1 76 ; CHECK: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $d0 77 ; CHECK: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $d1 78 ; CHECK: [[ZIP2_:%[0-9]+]]:_(<2 x s32>) = G_ZIP2 [[COPY]], [[COPY1]] 79 ; CHECK: $d0 = COPY [[ZIP2_]](<2 x s32>) 80 ; CHECK: RET_ReallyLR implicit $d0 81 %0:_(<2 x s32>) = COPY $d0 82 %1:_(<2 x s32>) = COPY $d1 83 %2:_(<2 x s32>) = G_SHUFFLE_VECTOR %0(<2 x s32>), %1, shufflemask(1, 3) 84 $d0 = COPY %2(<2 x s32>) 85 RET_ReallyLR implicit $d0 86