1; RUN: llc < %s -mtriple=armv7-apple-ios -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-IOS --check-prefix=CHECK 2; RUN: llc < %s -mtriple=thumbv7m-none-macho -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-DARWIN --check-prefix=CHECK 3; RUN: llc < %s -mtriple=arm-none-eabi -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-EABI --check-prefix=CHECK 4; RUN: llc < %s -mtriple=arm-none-eabihf -disable-post-ra -o - | FileCheck %s --check-prefix=CHECK-EABI --check-prefix=CHECK 5 6define void @f1(i8* %dest, i8* %src) { 7entry: 8 ; CHECK-LABEL: f1 9 10 ; CHECK-IOS: memmove 11 ; CHECK-DARWIN: memmove 12 ; CHECK-EABI: __aeabi_memmove 13 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 0, i1 false) 14 15 ; CHECK-IOS: memcpy 16 ; CHECK-DARWIN: memcpy 17 ; CHECK-EABI: __aeabi_memcpy 18 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %src, i32 500, i32 0, i1 false) 19 20 ; EABI memset swaps arguments 21 ; CHECK-IOS: mov r1, #0 22 ; CHECK-IOS: memset 23 ; CHECK-DARWIN: movs r1, #0 24 ; CHECK-DARWIN: memset 25 ; CHECK-EABI: mov r2, #0 26 ; CHECK-EABI: __aeabi_memset 27 call void @llvm.memset.p0i8.i32(i8* %dest, i8 0, i32 500, i32 0, i1 false) 28 unreachable 29} 30 31; Check that alloca arguments to memory intrinsics are automatically aligned if at least 8 bytes in size 32define void @f2(i8* %dest, i32 %n) { 33entry: 34 ; CHECK-LABEL: f2 35 36 ; IOS (ARMv7) should 8-byte align, others should 4-byte align 37 ; CHECK-IOS: add r1, sp, #32 38 ; CHECK-IOS: memmove 39 ; CHECK-DARWIN: add r1, sp, #28 40 ; CHECK-DARWIN: memmove 41 ; CHECK-EABI: add r1, sp, #28 42 ; CHECK-EABI: __aeabi_memmove 43 %arr0 = alloca [9 x i8], align 1 44 %0 = bitcast [9 x i8]* %arr0 to i8* 45 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 46 47 ; CHECK: add r1, sp, #16 48 ; CHECK-IOS: memcpy 49 ; CHECK-DARWIN: memcpy 50 ; CHECK-EABI: __aeabi_memcpy 51 %arr1 = alloca [9 x i8], align 1 52 %1 = bitcast [9 x i8]* %arr1 to i8* 53 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 54 55 ; CHECK-IOS: mov r0, sp 56 ; CHECK-IOS: mov r1, #0 57 ; CHECK-IOS: memset 58 ; CHECK-DARINW: add r0, sp, #4 59 ; CHECK-DARWIN: movs r1, #0 60 ; CHECK-DARWIN: memset 61 ; CHECK-EABI: add r0, sp, #4 62 ; CHECK-EABI: mov r2, #0 63 ; CHECK-EABI: __aeabi_memset 64 %arr2 = alloca [9 x i8], align 1 65 %2 = bitcast [9 x i8]* %arr2 to i8* 66 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 67 68 unreachable 69} 70 71; Check that alloca arguments are not aligned if less than 8 bytes in size 72define void @f3(i8* %dest, i32 %n) { 73entry: 74 ; CHECK-LABEL: f3 75 76 ; CHECK: {{add(.w)? r1, sp, #17|sub(.w)? r1, r7, #15}} 77 ; CHECK-IOS: memmove 78 ; CHECK-DARWIN: memmove 79 ; CHECK-EABI: __aeabi_memmove 80 %arr0 = alloca [7 x i8], align 1 81 %0 = bitcast [7 x i8]* %arr0 to i8* 82 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 83 84 ; CHECK: {{add(.w)? r1, sp, #10}} 85 ; CHECK-IOS: memcpy 86 ; CHECK-DARWIN: memcpy 87 ; CHECK-EABI: __aeabi_memcpy 88 %arr1 = alloca [7 x i8], align 1 89 %1 = bitcast [7 x i8]* %arr1 to i8* 90 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 91 92 ; CHECK: {{add(.w)? r0, sp, #3}} 93 ; CHECK-IOS: mov r1, #0 94 ; CHECK-IOS: memset 95 ; CHECK-DARWIN: movs r1, #0 96 ; CHECK-DARWIN: memset 97 ; CHECK-EABI: mov r2, #0 98 ; CHECK-EABI: __aeabi_memset 99 %arr2 = alloca [7 x i8], align 1 100 %2 = bitcast [7 x i8]* %arr2 to i8* 101 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 102 103 unreachable 104} 105 106; Check that alloca arguments are not aligned if size+offset is less than 8 bytes 107define void @f4(i8* %dest, i32 %n) { 108entry: 109 ; CHECK-LABEL: f4 110 111 ; CHECK: {{add(.w)? r., sp, #23|sub(.w)? r., r7, #17}} 112 ; CHECK-IOS: memmove 113 ; CHECK-DARWIN: memmove 114 ; CHECK-EABI: __aeabi_memmove 115 %arr0 = alloca [9 x i8], align 1 116 %0 = getelementptr inbounds [9 x i8], [9 x i8]* %arr0, i32 0, i32 4 117 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 118 119 ; CHECK: {{add(.w)? r., sp, #(10|14)}} 120 ; CHECK-IOS: memcpy 121 ; CHECK-DARWIN: memcpy 122 ; CHECK-EABI: __aeabi_memcpy 123 %arr1 = alloca [9 x i8], align 1 124 %1 = getelementptr inbounds [9 x i8], [9 x i8]* %arr1, i32 0, i32 4 125 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 126 127 ; CHECK: {{add(.w)? r., sp, #(1|5)}} 128 ; CHECK-IOS: mov r1, #0 129 ; CHECK-IOS: memset 130 ; CHECK-DARWIN: movs r1, #0 131 ; CHECK-DARWIN: memset 132 ; CHECK-EABI: mov r2, #0 133 ; CHECK-EABI: __aeabi_memset 134 %arr2 = alloca [9 x i8], align 1 135 %2 = getelementptr inbounds [9 x i8], [9 x i8]* %arr2, i32 0, i32 4 136 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 137 138 unreachable 139} 140 141; Check that alloca arguments are not aligned if the offset is not a multiple of 4 142define void @f5(i8* %dest, i32 %n) { 143entry: 144 ; CHECK-LABEL: f5 145 146 ; CHECK: {{add(.w)? r., sp, #27|sub(.w)? r., r7, #21}} 147 ; CHECK-IOS: memmove 148 ; CHECK-DARWIN: memmove 149 ; CHECK-EABI: __aeabi_memmove 150 %arr0 = alloca [13 x i8], align 1 151 %0 = getelementptr inbounds [13 x i8], [13 x i8]* %arr0, i32 0, i32 1 152 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 153 154 ; CHECK: {{add(.w)? r., sp, #(10|14)}} 155 ; CHECK-IOS: memcpy 156 ; CHECK-DARWIN: memcpy 157 ; CHECK-EABI: __aeabi_memcpy 158 %arr1 = alloca [13 x i8], align 1 159 %1 = getelementptr inbounds [13 x i8], [13 x i8]* %arr1, i32 0, i32 1 160 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 161 162 ; CHECK: {{add(.w)? r., sp, #(1|5)}} 163 ; CHECK-IOS: mov r1, #0 164 ; CHECK-IOS: memset 165 ; CHECK-DARWIN: movs r1, #0 166 ; CHECK-DARWIN: memset 167 ; CHECK-EABI: mov r2, #0 168 ; CHECK-EABI: __aeabi_memset 169 %arr2 = alloca [13 x i8], align 1 170 %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 1 171 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 172 173 unreachable 174} 175 176; Check that alloca arguments are not aligned if the offset is unknown 177define void @f6(i8* %dest, i32 %n, i32 %i) { 178entry: 179 ; CHECK-LABEL: f6 180 181 ; CHECK: {{add(.w)? r., sp, #27|sub(.w)? r., r7, #25}} 182 ; CHECK-IOS: memmove 183 ; CHECK-DARWIN: memmove 184 ; CHECK-EABI: __aeabi_memmove 185 %arr0 = alloca [13 x i8], align 1 186 %0 = getelementptr inbounds [13 x i8], [13 x i8]* %arr0, i32 0, i32 %i 187 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 188 189 ; CHECK: {{add(.w)? r., sp, #(10|14)}} 190 ; CHECK-IOS: memcpy 191 ; CHECK-DARWIN: memcpy 192 ; CHECK-EABI: __aeabi_memcpy 193 %arr1 = alloca [13 x i8], align 1 194 %1 = getelementptr inbounds [13 x i8], [13 x i8]* %arr1, i32 0, i32 %i 195 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 196 197 ; CHECK: {{add(.w)? r., sp, #(1|5)}} 198 ; CHECK-IOS: mov r1, #0 199 ; CHECK-IOS: memset 200 ; CHECK-DARWIN: movs r1, #0 201 ; CHECK-DARWIN: memset 202 ; CHECK-EABI: mov r2, #0 203 ; CHECK-EABI: __aeabi_memset 204 %arr2 = alloca [13 x i8], align 1 205 %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 %i 206 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 207 208 unreachable 209} 210 211; Check that alloca arguments are not aligned if the GEP is not inbounds 212define void @f7(i8* %dest, i32 %n) { 213entry: 214 ; CHECK-LABEL: f7 215 216 ; CHECK: {{add(.w)? r., sp, #27|sub(.w)? r., r7, #21}} 217 ; CHECK-IOS: memmove 218 ; CHECK-DARWIN: memmove 219 ; CHECK-EABI: __aeabi_memmove 220 %arr0 = alloca [13 x i8], align 1 221 %0 = getelementptr [13 x i8], [13 x i8]* %arr0, i32 0, i32 4 222 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 223 224 ; CHECK: {{add(.w)? r., sp, #(10|14)}} 225 ; CHECK-IOS: memcpy 226 ; CHECK-DARWIN: memcpy 227 ; CHECK-EABI: __aeabi_memcpy 228 %arr1 = alloca [13 x i8], align 1 229 %1 = getelementptr [13 x i8], [13 x i8]* %arr1, i32 0, i32 4 230 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 231 232 ; CHECK: {{add(.w)? r., sp, #(1|5)}} 233 ; CHECK-IOS: mov r1, #0 234 ; CHECK-IOS: memset 235 ; CHECK-DARWIN: movs r1, #0 236 ; CHECK-DARWIN: memset 237 ; CHECK-EABI: mov r2, #0 238 ; CHECK-EABI: __aeabi_memset 239 %arr2 = alloca [13 x i8], align 1 240 %2 = getelementptr [13 x i8], [13 x i8]* %arr2, i32 0, i32 4 241 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 242 243 unreachable 244} 245 246; Check that alloca arguments are not aligned when the offset is past the end of the allocation 247define void @f8(i8* %dest, i32 %n) { 248entry: 249 ; CHECK-LABEL: f8 250 251 ; CHECK: {{add(.w)? r., sp, #27|sub(.w)? r., r7, #21}} 252 ; CHECK-IOS: memmove 253 ; CHECK-DARWIN: memmove 254 ; CHECK-EABI: __aeabi_memmove 255 %arr0 = alloca [13 x i8], align 1 256 %0 = getelementptr inbounds [13 x i8], [13 x i8]* %arr0, i32 0, i32 16 257 call void @llvm.memmove.p0i8.p0i8.i32(i8* %dest, i8* %0, i32 %n, i32 0, i1 false) 258 259 ; CHECK: {{add(.w)? r., sp, #(10|14)}} 260 ; CHECK-IOS: memcpy 261 ; CHECK-DARWIN: memcpy 262 ; CHECK-EABI: __aeabi_memcpy 263 %arr1 = alloca [13 x i8], align 1 264 %1 = getelementptr inbounds [13 x i8], [13 x i8]* %arr1, i32 0, i32 16 265 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* %1, i32 %n, i32 0, i1 false) 266 267 ; CHECK: {{add(.w)? r., sp, #(1|5)}} 268 ; CHECK-IOS: mov r1, #0 269 ; CHECK-IOS: memset 270 ; CHECK-DARWIN: movs r1, #0 271 ; CHECK-DARWIN: memset 272 ; CHECK-EABI: mov r2, #0 273 ; CHECK-EABI: __aeabi_memset 274 %arr2 = alloca [13 x i8], align 1 275 %2 = getelementptr inbounds [13 x i8], [13 x i8]* %arr2, i32 0, i32 16 276 call void @llvm.memset.p0i8.i32(i8* %2, i8 0, i32 %n, i32 0, i1 false) 277 278 unreachable 279} 280 281; Check that global variables are aligned if they are large enough, but only if 282; they are defined in this object and don't have an explicit section. 283@arr1 = global [7 x i8] c"\01\02\03\04\05\06\07", align 1 284@arr2 = global [8 x i8] c"\01\02\03\04\05\06\07\08", align 1 285@arr3 = global [7 x i8] c"\01\02\03\04\05\06\07", section "foo,bar", align 1 286@arr4 = global [8 x i8] c"\01\02\03\04\05\06\07\08", section "foo,bar", align 1 287@arr5 = weak global [7 x i8] c"\01\02\03\04\05\06\07", align 1 288@arr6 = weak_odr global [7 x i8] c"\01\02\03\04\05\06\07", align 1 289@arr7 = external global [7 x i8], align 1 290define void @f9(i8* %dest, i32 %n) { 291entry: 292 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr1, i32 0, i32 0), i32 %n, i32 1, i1 false) 293 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr2, i32 0, i32 0), i32 %n, i32 1, i1 false) 294 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr3, i32 0, i32 0), i32 %n, i32 1, i1 false) 295 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([8 x i8], [8 x i8]* @arr4, i32 0, i32 0), i32 %n, i32 1, i1 false) 296 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr5, i32 0, i32 0), i32 %n, i32 1, i1 false) 297 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr6, i32 0, i32 0), i32 %n, i32 1, i1 false) 298 call void @llvm.memcpy.p0i8.p0i8.i32(i8* %dest, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @arr7, i32 0, i32 0), i32 %n, i32 1, i1 false) 299 300 unreachable 301} 302 303; CHECK: {{\.data|\.section.+data}} 304; CHECK-NOT: .align 305; CHECK: arr1: 306; CHECK-IOS: .align 3 307; CHECK-DARWIN: .align 2 308; CHECK-EABI: .align 2 309; CHECK: arr2: 310; CHECK: {{\.section.+foo,bar}} 311; CHECK-NOT: .align 312; CHECK: arr3: 313; CHECK-NOT: .align 314; CHECK: arr4: 315; CHECK: {{\.data|\.section.+data}} 316; CHECK-NOT: .align 317; CHECK: arr5: 318; CHECK-NOT: .align 319; CHECK: arr6: 320; CHECK-NOT: arr7: 321 322declare void @llvm.memmove.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind 323declare void @llvm.memcpy.p0i8.p0i8.i32(i8* nocapture, i8* nocapture, i32, i32, i1) nounwind 324declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i32, i1) nounwind 325