1; RUN: llc -march=arm64 < %s | FileCheck %s 2; rdar://10232252 3 4@object = external hidden global i64, section "__DATA, __objc_ivar", align 8 5 6; base + offset (imm9) 7; CHECK: @t1 8; CHECK: ldr xzr, [x{{[0-9]+}}, #8] 9; CHECK: ret 10define void @t1() { 11 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 1 12 %tmp = load volatile i64, i64* %incdec.ptr, align 8 13 ret void 14} 15 16; base + offset (> imm9) 17; CHECK: @t2 18; CHECK: sub [[ADDREG:x[0-9]+]], x{{[0-9]+}}, #264 19; CHECK: ldr xzr, [ 20; CHECK: [[ADDREG]]] 21; CHECK: ret 22define void @t2() { 23 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 -33 24 %tmp = load volatile i64, i64* %incdec.ptr, align 8 25 ret void 26} 27 28; base + unsigned offset (> imm9 and <= imm12 * size of type in bytes) 29; CHECK: @t3 30; CHECK: ldr xzr, [x{{[0-9]+}}, #32760] 31; CHECK: ret 32define void @t3() { 33 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4095 34 %tmp = load volatile i64, i64* %incdec.ptr, align 8 35 ret void 36} 37 38; base + unsigned offset (> imm12 * size of type in bytes) 39; CHECK: @t4 40; CHECK: orr w[[NUM:[0-9]+]], wzr, #0x8000 41; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]] 42; CHECK: ret 43define void @t4() { 44 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 4096 45 %tmp = load volatile i64, i64* %incdec.ptr, align 8 46 ret void 47} 48 49; base + reg 50; CHECK: @t5 51; CHECK: ldr xzr, [x{{[0-9]+}}, x{{[0-9]+}}, lsl #3] 52; CHECK: ret 53define void @t5(i64 %a) { 54 %incdec.ptr = getelementptr inbounds i64, i64* @object, i64 %a 55 %tmp = load volatile i64, i64* %incdec.ptr, align 8 56 ret void 57} 58 59; base + reg + imm 60; CHECK: @t6 61; CHECK: add [[ADDREG:x[0-9]+]], x{{[0-9]+}}, x{{[0-9]+}}, lsl #3 62; CHECK-NEXT: orr w[[NUM:[0-9]+]], wzr, #0x8000 63; CHECK: ldr xzr, [x{{[0-9]+}}, x[[NUM]]] 64; CHECK: ret 65define void @t6(i64 %a) { 66 %tmp1 = getelementptr inbounds i64, i64* @object, i64 %a 67 %incdec.ptr = getelementptr inbounds i64, i64* %tmp1, i64 4096 68 %tmp = load volatile i64, i64* %incdec.ptr, align 8 69 ret void 70} 71 72; Test base + wide immediate 73define void @t7(i64 %a) { 74; CHECK-LABEL: t7: 75; CHECK: orr w[[NUM:[0-9]+]], wzr, #0xffff 76; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]] 77 %1 = add i64 %a, 65535 ;0xffff 78 %2 = inttoptr i64 %1 to i64* 79 %3 = load volatile i64, i64* %2, align 8 80 ret void 81} 82 83define void @t8(i64 %a) { 84; CHECK-LABEL: t8: 85; CHECK: mov [[REG:x[0-9]+]], #-4662 86; CHECK-NEXT: ldr xzr, [x0, [[REG]]] 87 %1 = sub i64 %a, 4662 ;-4662 is 0xffffffffffffedca 88 %2 = inttoptr i64 %1 to i64* 89 %3 = load volatile i64, i64* %2, align 8 90 ret void 91} 92 93define void @t9(i64 %a) { 94; CHECK-LABEL: t9: 95; CHECK: mov [[REG:x[0-9]+]], #-305463297 96; CHECK-NEXT: ldr xzr, [x0, [[REG]]] 97 %1 = add i64 -305463297, %a ;-305463297 is 0xffffffffedcaffff 98 %2 = inttoptr i64 %1 to i64* 99 %3 = load volatile i64, i64* %2, align 8 100 ret void 101} 102 103define void @t10(i64 %a) { 104; CHECK-LABEL: t10: 105; CHECK: mov [[REG:x[0-9]+]], #81909218222800896 106; CHECK-NEXT: ldr xzr, [x0, [[REG]]] 107 %1 = add i64 %a, 81909218222800896 ;0x123000000000000 108 %2 = inttoptr i64 %1 to i64* 109 %3 = load volatile i64, i64* %2, align 8 110 ret void 111} 112 113define void @t11(i64 %a) { 114; CHECK-LABEL: t11: 115; CHECK: mov w[[NUM:[0-9]+]], #19070976 116; CHECK: movk w[[NUM:[0-9]+]], #17767 117; CHECK-NEXT: ldr xzr, [x0, x[[NUM]]] 118 %1 = add i64 %a, 19088743 ;0x1234567 119 %2 = inttoptr i64 %1 to i64* 120 %3 = load volatile i64, i64* %2, align 8 121 ret void 122} 123 124; Test some boundaries that should not use movz/movn/orr 125define void @t12(i64 %a) { 126; CHECK-LABEL: t12: 127; CHECK: add [[REG:x[0-9]+]], x0, #4095 128; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] 129 %1 = add i64 %a, 4095 ;0xfff 130 %2 = inttoptr i64 %1 to i64* 131 %3 = load volatile i64, i64* %2, align 8 132 ret void 133} 134 135define void @t13(i64 %a) { 136; CHECK-LABEL: t13: 137; CHECK: sub [[REG:x[0-9]+]], x0, #4095 138; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] 139 %1 = add i64 %a, -4095 ;-0xfff 140 %2 = inttoptr i64 %1 to i64* 141 %3 = load volatile i64, i64* %2, align 8 142 ret void 143} 144 145define void @t14(i64 %a) { 146; CHECK-LABEL: t14: 147; CHECK: add [[REG:x[0-9]+]], x0, #291, lsl #12 148; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] 149 %1 = add i64 %a, 1191936 ;0x123000 150 %2 = inttoptr i64 %1 to i64* 151 %3 = load volatile i64, i64* %2, align 8 152 ret void 153} 154 155define void @t15(i64 %a) { 156; CHECK-LABEL: t15: 157; CHECK: sub [[REG:x[0-9]+]], x0, #291, lsl #12 158; CHECK-NEXT: ldr xzr, {{\[}}[[REG]]] 159 %1 = add i64 %a, -1191936 ;0xFFFFFFFFFFEDD000 160 %2 = inttoptr i64 %1 to i64* 161 %3 = load volatile i64, i64* %2, align 8 162 ret void 163} 164 165define void @t16(i64 %a) { 166; CHECK-LABEL: t16: 167; CHECK: ldr xzr, [x0, #28672] 168 %1 = add i64 %a, 28672 ;0x7000 169 %2 = inttoptr i64 %1 to i64* 170 %3 = load volatile i64, i64* %2, align 8 171 ret void 172} 173 174define void @t17(i64 %a) { 175; CHECK-LABEL: t17: 176; CHECK: ldur xzr, [x0, #-256] 177 %1 = add i64 %a, -256 ;-0x100 178 %2 = inttoptr i64 %1 to i64* 179 %3 = load volatile i64, i64* %2, align 8 180 ret void 181} 182