1; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM 2; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB 3; RUN: llc < %s -O0 -mattr=+strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN 4; RUN: llc < %s -O0 -mattr=+strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN 5 6; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM 7; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=THUMB 8; RUN: llc < %s -O0 -mattr=+strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN 9; RUN: llc < %s -O0 -mattr=+strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-linux-gnueabi -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN 10 11; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-nacl -verify-machineinstrs | FileCheck %s --check-prefix=ARM 12; RUN: llc < %s -O0 -mattr=+strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-nacl -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN 13 14; RUN: llc < %s -O0 -mattr=+strict-align -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN 15; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-unknown-unknown -mattr=+strict-align -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN 16; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=ARM 17; RUN: llc < %s -O0 -fast-isel-abort=1 -relocation-model=dynamic-no-pic -mtriple=thumbv7-unknown-unknown -verify-machineinstrs | FileCheck %s --check-prefix=THUMB 18; RUN: llc < %s -O0 -relocation-model=dynamic-no-pic -mtriple=armv7-unknown-unknown -mattr=+strict-align -verify-machineinstrs | FileCheck %s --check-prefix=ARM-STRICT-ALIGN 19; RUN: llc < %s -O0 -relocation-model=dynamic-no-pic -mtriple=thumbv7-unknown-unknown -mattr=+strict-align -verify-machineinstrs | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN 20 21; Check unaligned stores 22%struct.anon = type <{ float }> 23 24@a = common global %struct.anon* null, align 4 25 26define void @unaligned_store(float %x, float %y) nounwind { 27entry: 28; ARM: @unaligned_store 29; ARM: vmov r1, s0 30; ARM: str r1, [r0] 31 32; THUMB: @unaligned_store 33; THUMB: vmov r1, s0 34; THUMB: str r1, [r0] 35 36 %add = fadd float %x, %y 37 %0 = load %struct.anon*, %struct.anon** @a, align 4 38 %x1 = getelementptr inbounds %struct.anon, %struct.anon* %0, i32 0, i32 0 39 store float %add, float* %x1, align 1 40 ret void 41} 42 43; Doublewords require only word-alignment. 44; rdar://10528060 45%struct.anon.0 = type { double } 46 47@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4 48 49define void @word_aligned_f64_store(double %a, double %b) nounwind { 50entry: 51; ARM: @word_aligned_f64_store 52; THUMB: @word_aligned_f64_store 53 %add = fadd double %a, %b 54 store double %add, double* getelementptr inbounds (%struct.anon.0, %struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4 55; ARM: vstr d16, [r0] 56; THUMB: vstr d16, [r0] 57 ret void 58} 59 60; Check unaligned loads of floats 61%class.TAlignTest = type <{ i16, float }> 62 63define zeroext i1 @unaligned_f32_load(%class.TAlignTest* %this) nounwind align 2 { 64entry: 65; ARM: @unaligned_f32_load 66; THUMB: @unaligned_f32_load 67 %0 = alloca %class.TAlignTest*, align 4 68 store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4 69 %1 = load %class.TAlignTest*, %class.TAlignTest** %0 70 %2 = getelementptr inbounds %class.TAlignTest, %class.TAlignTest* %1, i32 0, i32 1 71 %3 = load float, float* %2, align 1 72 %4 = fcmp une float %3, 0.000000e+00 73; ARM: ldr r[[R:[0-9]+]], [r0, #2] 74; ARM: vmov s0, r[[R]] 75; ARM: vcmpe.f32 s0, #0 76; THUMB: ldr.w r[[R:[0-9]+]], [r0, #2] 77; THUMB: vmov s0, r[[R]] 78; THUMB: vcmpe.f32 s0, #0 79 ret i1 %4 80} 81 82define void @unaligned_i16_store(i16 %x, i16* %y) nounwind { 83entry: 84; ARM-STRICT-ALIGN: @unaligned_i16_store 85; ARM-STRICT-ALIGN: strb 86; ARM-STRICT-ALIGN: strb 87 88; THUMB-STRICT-ALIGN: @unaligned_i16_store 89; THUMB-STRICT-ALIGN: strb 90; THUMB-STRICT-ALIGN: strb 91 92 store i16 %x, i16* %y, align 1 93 ret void 94} 95 96define i16 @unaligned_i16_load(i16* %x) nounwind { 97entry: 98; ARM-STRICT-ALIGN: @unaligned_i16_load 99; ARM-STRICT-ALIGN: ldrb 100; ARM-STRICT-ALIGN: ldrb 101 102; THUMB-STRICT-ALIGN: @unaligned_i16_load 103; THUMB-STRICT-ALIGN: ldrb 104; THUMB-STRICT-ALIGN: ldrb 105 106 %0 = load i16, i16* %x, align 1 107 ret i16 %0 108} 109 110define void @unaligned_i32_store(i32 %x, i32* %y) nounwind { 111entry: 112; ARM-STRICT-ALIGN: @unaligned_i32_store 113; ARM-STRICT-ALIGN: strb 114; ARM-STRICT-ALIGN: strb 115; ARM-STRICT-ALIGN: strb 116; ARM-STRICT-ALIGN: strb 117 118; THUMB-STRICT-ALIGN: @unaligned_i32_store 119; THUMB-STRICT-ALIGN: strb 120; THUMB-STRICT-ALIGN: strb 121; THUMB-STRICT-ALIGN: strb 122; THUMB-STRICT-ALIGN: strb 123 124 store i32 %x, i32* %y, align 1 125 ret void 126} 127 128define i32 @unaligned_i32_load(i32* %x) nounwind { 129entry: 130; ARM-STRICT-ALIGN: @unaligned_i32_load 131; ARM-STRICT-ALIGN: ldrb 132; ARM-STRICT-ALIGN: ldrb 133; ARM-STRICT-ALIGN: ldrb 134; ARM-STRICT-ALIGN: ldrb 135 136; THUMB-STRICT-ALIGN: @unaligned_i32_load 137; THUMB-STRICT-ALIGN: ldrb 138; THUMB-STRICT-ALIGN: ldrb 139; THUMB-STRICT-ALIGN: ldrb 140; THUMB-STRICT-ALIGN: ldrb 141 142 %0 = load i32, i32* %x, align 1 143 ret i32 %0 144} 145