/external/llvm-project/llvm/test/CodeGen/AMDGPU/GlobalISel/ |
D | regbankselect-dyn-stackalloc.mir | 22 ; WAVE64: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[C]](s32) 24 ; WAVE64: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[SHL]](s32) 29 ; WAVE32: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[C]](s32) 31 ; WAVE32: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[SHL]](s32) 52 ; WAVE64: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[C]](s32) 54 ; WAVE64: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[SHL]](s32) 59 ; WAVE32: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[C]](s32) 61 ; WAVE32: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[SHL]](s32) 82 ; WAVE64: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[C]](s32) 84 ; WAVE64: [[PTR_ADD:%[0-9]+]]:sgpr(p5) = G_PTR_ADD [[COPY1]], [[SHL]](s32) [all …]
|
D | inst-select-shl.v2s16.mir | 23 ; GFX6: [[SHL:%[0-9]+]]:sgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY1]](<2 x s16>) 24 ; GFX6: S_ENDPGM 0, implicit [[SHL]](<2 x s16>) 28 ; GFX7: [[SHL:%[0-9]+]]:sgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY1]](<2 x s16>) 29 ; GFX7: S_ENDPGM 0, implicit [[SHL]](<2 x s16>) 33 ; GFX8: [[SHL:%[0-9]+]]:sgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY1]](<2 x s16>) 34 ; GFX8: S_ENDPGM 0, implicit [[SHL]](<2 x s16>) 38 ; GFX9: [[SHL:%[0-9]+]]:sgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY1]](<2 x s16>) 39 ; GFX9: S_ENDPGM 0, implicit [[SHL]](<2 x s16>) 43 ; GFX10: [[SHL:%[0-9]+]]:sgpr(<2 x s16>) = G_SHL [[COPY]], [[COPY1]](<2 x s16>) 44 ; GFX10: S_ENDPGM 0, implicit [[SHL]](<2 x s16>) [all …]
|
D | regbankselect-shl.mir | 15 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY]], [[COPY1]](s32) 16 ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32) 34 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY2]], [[COPY1]](s32) 35 ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32) 53 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY2]](s32) 54 ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32) 71 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY]], [[COPY1]](s32) 72 ; CHECK: S_ENDPGM 0, implicit [[SHL]](s32) 93 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[ANYEXT]], [[ZEXT]](s32) 94 ; CHECK: [[TRUNC2:%[0-9]+]]:sgpr(s16) = G_TRUNC [[SHL]](s32) [all …]
|
D | inst-select-shl.s16.mir | 35 ; GFX8: [[SHL:%[0-9]+]]:sgpr(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) 36 ; GFX8: S_ENDPGM 0, implicit [[SHL]](s16) 42 ; GFX9: [[SHL:%[0-9]+]]:sgpr(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) 43 ; GFX9: S_ENDPGM 0, implicit [[SHL]](s16) 49 ; GFX10: [[SHL:%[0-9]+]]:sgpr(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16) 50 ; GFX10: S_ENDPGM 0, implicit [[SHL]](s16) 103 ; GFX8: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[TRUNC]], [[COPY1]](s32) 104 ; GFX8: S_ENDPGM 0, implicit [[SHL]](s16) 109 ; GFX9: [[SHL:%[0-9]+]]:vgpr(s16) = G_SHL [[TRUNC]], [[COPY1]](s32) 110 ; GFX9: S_ENDPGM 0, implicit [[SHL]](s16) [all …]
|
D | combine-shl-from-extend-narrow.postlegal.mir | 19 ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C]](s32) 20 ; GFX6: $vgpr0_vgpr1 = COPY [[SHL]](s64) 26 ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ANYEXT]], [[C]](s32) 27 ; GFX9: $vgpr0_vgpr1 = COPY [[SHL]](s64) 49 ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C]](s32) 50 ; GFX6: $vgpr0_vgpr1 = COPY [[SHL]](s64) 56 ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[SEXT]], [[C]](s32) 57 ; GFX9: $vgpr0_vgpr1 = COPY [[SHL]](s64) 79 ; GFX6: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[ZEXT]], [[C]](s32) 80 ; GFX6: $vgpr0_vgpr1 = COPY [[SHL]](s64) [all …]
|
D | combine-shl-narrow.mir | 57 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32) 59 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C1]](s32), [[SHL]](s32) 78 ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s32) 79 ; CHECK: $vgpr0_vgpr1 = COPY [[SHL]](s64) 98 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32) 100 ; CHECK: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[C1]](s32), [[SHL]](s32) 119 ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s32) 120 ; CHECK: $vgpr0_vgpr1 = COPY [[SHL]](s64) 138 ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s32) 139 ; CHECK: $vgpr0_vgpr1 = COPY [[SHL]](s64) [all …]
|
D | legalize-shl.mir | 15 ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) 16 ; SI: $vgpr0 = COPY [[SHL]](s32) 20 ; VI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) 21 ; VI: $vgpr0 = COPY [[SHL]](s32) 25 ; GFX9: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]](s32) 26 ; GFX9: $vgpr0 = COPY [[SHL]](s32) 42 ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) 43 ; SI: $vgpr0_vgpr1 = COPY [[SHL]](s64) 48 ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s32) 49 ; VI: $vgpr0_vgpr1 = COPY [[SHL]](s64) [all …]
|
D | legalize-ushlsat.mir | 20 ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32) 21 ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) 24 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[LSHR]] 37 ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) 38 ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) 41 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] 54 ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) 55 ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) 58 ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[LSHR]] 86 ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32) [all …]
|
D | combine-trunc-shl.mir | 17 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32) 18 ; CHECK: $vgpr0 = COPY [[SHL]](s32) 38 ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32) 39 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32) 61 ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[C]](s32) 62 ; CHECK: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s64)
|
D | legalize-sshlsat.mir | 20 ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C1]](s32) 21 ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[SHL]], [[AND]](s32) 26 ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s32), [[C4]] 28 ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s32), [[ASHR]] 41 ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) 42 ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) 47 ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[SHL]](s16), [[C4]] 49 ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[SHL]](s16), [[ASHR]] 62 ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C1]](s16) 63 ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[SHL]], [[AND]](s16) [all …]
|
D | regbankselect-build-vector-trunc.v2s16.mir | 35 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY1]], [[C1]](s32) 37 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[AND]], [[SHL]] 57 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY1]], [[C1]](s32) 59 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[AND]], [[SHL]] 79 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY1]], [[C1]](s32) 81 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[AND]], [[SHL]]
|
D | regbankselect-build-vector-trunc.mir | 34 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY1]], [[C1]](s32) 36 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[AND]], [[SHL]] 55 ; CHECK: [[SHL:%[0-9]+]]:sgpr(s32) = G_SHL [[COPY1]], [[C1]](s32) 57 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[AND]], [[SHL]] 76 ; CHECK: [[SHL:%[0-9]+]]:vgpr(s32) = G_SHL [[COPY1]], [[C1]](s32) 78 ; CHECK: [[OR:%[0-9]+]]:vgpr(s32) = G_OR [[AND]], [[SHL]]
|
/external/llvm-project/llvm/test/Transforms/CorrelatedValuePropagation/ |
D | shl.ll | 6 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[A:%.*]], [[B:%.*]] 7 ; CHECK-NEXT: ret i8 [[SHL]] 19 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[A:%.*]], [[B]] 20 ; CHECK-NEXT: ret i8 [[SHL]] 42 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[A:%.*]], [[B]] 43 ; CHECK-NEXT: ret i8 [[SHL]] 65 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[A:%.*]], [[B]] 66 ; CHECK-NEXT: ret i8 [[SHL]] 88 ; CHECK-NEXT: [[SHL:%.*]] = shl nuw nsw i8 [[A:%.*]], [[B]] 89 ; CHECK-NEXT: ret i8 [[SHL]] [all …]
|
/external/llvm-project/llvm/test/Transforms/InstCombine/ |
D | signbit-shl-and-icmpeq-zero.ll | 11 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 -128, [[Y:%.*]] 12 ; CHECK-NEXT: [[AND:%.*]] = and i8 [[SHL]], [[X:%.*]] 24 ; CHECK-NEXT: [[SHL:%.*]] = shl i16 -32768, [[Y:%.*]] 25 ; CHECK-NEXT: [[AND:%.*]] = and i16 [[SHL]], [[X:%.*]] 37 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]] 38 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]] 50 ; CHECK-NEXT: [[SHL:%.*]] = shl i64 -9223372036854775808, [[Y:%.*]] 51 ; CHECK-NEXT: [[AND:%.*]] = and i64 [[SHL]], [[X:%.*]] 63 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 -2147483648, [[Y:%.*]] 64 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[SHL]], [[X:%.*]] [all …]
|
D | shl-and-negC-icmpeq-zero.ll | 12 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], [[Y:%.*]] 13 ; CHECK-NEXT: [[R:%.*]] = icmp ult i8 [[SHL]], 4 24 ; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[X:%.*]], [[Y:%.*]] 25 ; CHECK-NEXT: [[R:%.*]] = icmp ult i16 [[SHL]], 128 36 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]] 37 ; CHECK-NEXT: [[R:%.*]] = icmp ult i32 [[SHL]], 262144 48 ; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[X:%.*]], [[Y:%.*]] 49 ; CHECK-NEXT: [[R:%.*]] = icmp ult i64 [[SHL]], 8589934592 60 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]] 61 ; CHECK-NEXT: [[R:%.*]] = icmp ugt i32 [[SHL]], 262143 [all …]
|
D | shl-and-signbit-icmpeq-zero.ll | 12 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], [[Y:%.*]] 13 ; CHECK-NEXT: [[R:%.*]] = icmp sgt i8 [[SHL]], -1 24 ; CHECK-NEXT: [[SHL:%.*]] = shl i16 [[X:%.*]], [[Y:%.*]] 25 ; CHECK-NEXT: [[R:%.*]] = icmp sgt i16 [[SHL]], -1 36 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]] 37 ; CHECK-NEXT: [[R:%.*]] = icmp sgt i32 [[SHL]], -1 48 ; CHECK-NEXT: [[SHL:%.*]] = shl i64 [[X:%.*]], [[Y:%.*]] 49 ; CHECK-NEXT: [[R:%.*]] = icmp sgt i64 [[SHL]], -1 60 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], [[Y:%.*]] 61 ; CHECK-NEXT: [[R:%.*]] = icmp slt i32 [[SHL]], 0 [all …]
|
D | pr27343.ll | 8 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[DOTCAST]], 1 9 ; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[SHL]], -16777216 26 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[X:%.*]], 7 27 ; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[SHL]], 4608
|
D | shl-unsigned-cmp-const.ll | 130 ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], <i32 11, i32 11, i32 undef, i32 11> 131 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult <4 x i32> [[SHL]], <i32 131072, i32 131072, i32 131072, i32… 141 ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], <i32 11, i32 11, i32 11, i32 11> 142 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult <4 x i32> [[SHL]], <i32 131072, i32 131072, i32 131072, i32… 152 ; CHECK-NEXT: [[SHL:%.*]] = shl <4 x i32> [[X:%.*]], <i32 11, i32 11, i32 undef, i32 11> 153 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult <4 x i32> [[SHL]], <i32 undef, i32 131072, i32 131072, i32 … 202 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], 5 203 ; CHECK-NEXT: store i8 [[SHL]], i8* [[P:%.*]], align 1 204 ; CHECK-NEXT: [[CMP:%.*]] = icmp ult i8 [[SHL]], 64 218 ; CHECK-NEXT: [[SHL:%.*]] = shl i8 [[X:%.*]], 5 [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Lanai/ |
D | LanaiAluCode.h | 35 SHL = 0x17, enumerator 92 case SHL: in lanaiAluCodeToString() 112 .Case("sh", SHL) in stringToLanaiAluCode() 134 case ISD::SHL: in isdToLanaiAluCode() 135 return AluCode::SHL; in isdToLanaiAluCode()
|
/external/llvm-project/llvm/lib/Target/Lanai/ |
D | LanaiAluCode.h | 35 SHL = 0x17, enumerator 92 case SHL: in lanaiAluCodeToString() 112 .Case("sh", SHL) in stringToLanaiAluCode() 134 case ISD::SHL: in isdToLanaiAluCode() 135 return AluCode::SHL; in isdToLanaiAluCode()
|
/external/llvm/lib/Target/Lanai/ |
D | LanaiAluCode.h | 36 SHL = 0x17, enumerator 93 case SHL: in lanaiAluCodeToString() 113 .Case("sh", SHL) in stringToLanaiAluCode() 135 case ISD::SHL: in isdToLanaiAluCode() 136 return AluCode::SHL; in isdToLanaiAluCode()
|
/external/llvm-project/llvm/test/Transforms/AggressiveInstCombine/ |
D | funnel.ll | 12 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[C]] 13 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]] 43 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[C]] 44 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]] 74 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[C]] 75 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHL]], [[SHR]] 107 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[C]] 108 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]] 141 ; CHECK-NEXT: [[SHL:%.*]] = shl i32 [[A:%.*]], [[SUB]] 143 ; CHECK-NEXT: [[OR:%.*]] = or i32 [[SHR]], [[SHL]] [all …]
|
/external/llvm-project/clang/test/CodeGenObjC/ |
D | ubsan-bool.m | 23 // OBJC: [[SHL:%.*]] = shl i8 [[LOAD]], 7 24 // OBJC: [[ASHR:%.*]] = ashr i8 [[SHL]], 7 49 // OBJC: [[SHL:%.*]] = shl i8 [[LOAD]], 7 50 // OBJC: [[ASHR:%.*]] = ashr i8 [[SHL]], 7 58 // OBJC: [[SHL:%.*]] = shl i8 [[LOAD]], 7 59 // OBJC: [[ASHR:%.*]] = ashr i8 [[SHL]], 7
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | bfe-patterns.ll | 27 ; SI-NEXT: v_lshl_b32_e32 [[SHL:v[0-9]+]], [[SRC]], [[SUB]] 28 ; SI-NEXT: v_lshr_b32_e32 [[BFE:v[0-9]+]], [[SHL]], [[SUB]] 30 ; VI-NEXT: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], [[SUB]], [[SRC]] 31 ; VI-NEXT: v_lshrrev_b32_e32 [[BFE:v[0-9]+]], [[SUB]], [[SHL]] 34 ; GCN: [[SHL]] 68 ; GCN: s_lshl_b32 [[SHL:s[0-9]+]], s[[SRC]], [[SUB]] 69 ; GCN: s_lshr_b32 s{{[0-9]+}}, [[SHL]], [[SUB]] 104 ; SI-NEXT: v_lshl_b32_e32 [[SHL:v[0-9]+]], [[SRC]], [[SUB]] 105 ; SI-NEXT: v_ashr_i32_e32 [[BFE:v[0-9]+]], [[SHL]], [[SUB]] 107 ; VI-NEXT: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], [[SUB]], [[SRC]] [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | shift-i64-opts.ll | 126 ; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 31, [[VAL]] 127 ; GCN: buffer_store_dword [[SHL]] 138 ; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 15, [[VAL]] 139 ; GCN: buffer_store_short [[SHL]] 150 ; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 15, [[VAL]] 151 ; GCN: buffer_store_short [[SHL]] 162 ; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 7, [[VAL]] 163 ; GCN: buffer_store_byte [[SHL]] 174 ; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 1, [[VAL]] 175 ; GCN: v_and_b32_e32 [[AND:v[0-9]+]], 2, [[SHL]] [all …]
|