1; Test 32-bit logical shifts right. 2; 3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s 4 5; Check the low end of the SRLG range. 6define i64 @f1(i64 %a) { 7; CHECK-LABEL: f1: 8; CHECK: srlg %r2, %r2, 1 9; CHECK: br %r14 10 %shift = lshr i64 %a, 1 11 ret i64 %shift 12} 13 14; Check the high end of the defined SRLG range. 15define i64 @f2(i64 %a) { 16; CHECK-LABEL: f2: 17; CHECK: srlg %r2, %r2, 63 18; CHECK: br %r14 19 %shift = lshr i64 %a, 63 20 ret i64 %shift 21} 22 23; We don't generate shifts by out-of-range values. 24define i64 @f3(i64 %a) { 25; CHECK-LABEL: f3: 26; CHECK-NOT: srlg 27; CHECK: br %r14 28 %shift = lshr i64 %a, 64 29 ret i64 %shift 30} 31 32; Check variable shifts. 33define i64 @f4(i64 %a, i64 %amt) { 34; CHECK-LABEL: f4: 35; CHECK: srlg %r2, %r2, 0(%r3) 36; CHECK: br %r14 37 %shift = lshr i64 %a, %amt 38 ret i64 %shift 39} 40 41; Check shift amounts that have a constant term. 42define i64 @f5(i64 %a, i64 %amt) { 43; CHECK-LABEL: f5: 44; CHECK: srlg %r2, %r2, 10(%r3) 45; CHECK: br %r14 46 %add = add i64 %amt, 10 47 %shift = lshr i64 %a, %add 48 ret i64 %shift 49} 50 51; ...and again with a sign-extended 32-bit shift amount. 52define i64 @f6(i64 %a, i32 %amt) { 53; CHECK-LABEL: f6: 54; CHECK: srlg %r2, %r2, 10(%r3) 55; CHECK: br %r14 56 %add = add i32 %amt, 10 57 %addext = sext i32 %add to i64 58 %shift = lshr i64 %a, %addext 59 ret i64 %shift 60} 61 62; ...and now with a zero-extended 32-bit shift amount. 63define i64 @f7(i64 %a, i32 %amt) { 64; CHECK-LABEL: f7: 65; CHECK: srlg %r2, %r2, 10(%r3) 66; CHECK: br %r14 67 %add = add i32 %amt, 10 68 %addext = zext i32 %add to i64 69 %shift = lshr i64 %a, %addext 70 ret i64 %shift 71} 72 73; Check shift amounts that have the largest in-range constant term. We could 74; mask the amount instead. 75define i64 @f8(i64 %a, i64 %amt) { 76; CHECK-LABEL: f8: 77; CHECK: srlg %r2, %r2, 524287(%r3) 78; CHECK: br %r14 79 %add = add i64 %amt, 524287 80 %shift = lshr i64 %a, %add 81 ret i64 %shift 82} 83 84; Check the next value up, which without masking must use a separate 85; addition. 86define i64 @f9(i64 %a, i64 %amt) { 87; CHECK-LABEL: f9: 88; CHECK: a{{g?}}fi %r3, 524288 89; CHECK: srlg %r2, %r2, 0(%r3) 90; CHECK: br %r14 91 %add = add i64 %amt, 524288 92 %shift = lshr i64 %a, %add 93 ret i64 %shift 94} 95 96; Check cases where 1 is subtracted from the shift amount. 97define i64 @f10(i64 %a, i64 %amt) { 98; CHECK-LABEL: f10: 99; CHECK: srlg %r2, %r2, -1(%r3) 100; CHECK: br %r14 101 %sub = sub i64 %amt, 1 102 %shift = lshr i64 %a, %sub 103 ret i64 %shift 104} 105 106; Check the lowest value that can be subtracted from the shift amount. 107; Again, we could mask the shift amount instead. 108define i64 @f11(i64 %a, i64 %amt) { 109; CHECK-LABEL: f11: 110; CHECK: srlg %r2, %r2, -524288(%r3) 111; CHECK: br %r14 112 %sub = sub i64 %amt, 524288 113 %shift = lshr i64 %a, %sub 114 ret i64 %shift 115} 116 117; Check the next value down, which without masking must use a separate 118; addition. 119define i64 @f12(i64 %a, i64 %amt) { 120; CHECK-LABEL: f12: 121; CHECK: a{{g?}}fi %r3, -524289 122; CHECK: srlg %r2, %r2, 0(%r3) 123; CHECK: br %r14 124 %sub = sub i64 %amt, 524289 125 %shift = lshr i64 %a, %sub 126 ret i64 %shift 127} 128 129; Check that we don't try to generate "indexed" shifts. 130define i64 @f13(i64 %a, i64 %b, i64 %c) { 131; CHECK-LABEL: f13: 132; CHECK: a{{g?}}r {{%r3, %r4|%r4, %r3}} 133; CHECK: srlg %r2, %r2, 0({{%r[34]}}) 134; CHECK: br %r14 135 %add = add i64 %b, %c 136 %shift = lshr i64 %a, %add 137 ret i64 %shift 138} 139 140; Check that the shift amount uses an address register. It cannot be in %r0. 141define i64 @f14(i64 %a, i64 *%ptr) { 142; CHECK-LABEL: f14: 143; CHECK: l %r1, 4(%r3) 144; CHECK: srlg %r2, %r2, 0(%r1) 145; CHECK: br %r14 146 %amt = load i64 , i64 *%ptr 147 %shift = lshr i64 %a, %amt 148 ret i64 %shift 149} 150