1; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py 2; RUN: llc -mtriple=aarch64-- -mcpu=falkor -O0 -aarch64-enable-atomic-cfg-tidy=0 -stop-after=irtranslator -global-isel -verify-machineinstrs %s -o - | FileCheck %s 3 4define i32 @load_invariant(i32* %ptr) { 5 ; CHECK-LABEL: name: load_invariant 6 ; CHECK: bb.1 (%ir-block.0): 7 ; CHECK: liveins: $x0 8 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 9 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (invariant load 4 from %ir.ptr) 10 ; CHECK: $w0 = COPY [[LOAD]](s32) 11 ; CHECK: RET_ReallyLR implicit $w0 12 %load = load i32, i32* %ptr, align 4, !invariant.load !0 13 ret i32 %load 14} 15 16define i32 @load_volatile_invariant(i32* %ptr) { 17 ; CHECK-LABEL: name: load_volatile_invariant 18 ; CHECK: bb.1 (%ir-block.0): 19 ; CHECK: liveins: $x0 20 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 21 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (volatile invariant load 4 from %ir.ptr) 22 ; CHECK: $w0 = COPY [[LOAD]](s32) 23 ; CHECK: RET_ReallyLR implicit $w0 24 %load = load volatile i32, i32* %ptr, align 4, !invariant.load !0 25 ret i32 %load 26} 27 28define i32 @load_dereferenceable(i32* dereferenceable(4) %ptr) { 29 ; CHECK-LABEL: name: load_dereferenceable 30 ; CHECK: bb.1 (%ir-block.0): 31 ; CHECK: liveins: $x0 32 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 33 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable load 4 from %ir.ptr) 34 ; CHECK: $w0 = COPY [[LOAD]](s32) 35 ; CHECK: RET_ReallyLR implicit $w0 36 %load = load i32, i32* %ptr, align 4 37 ret i32 %load 38} 39 40define i32 @load_dereferenceable_invariant(i32* dereferenceable(4) %ptr) { 41 ; CHECK-LABEL: name: load_dereferenceable_invariant 42 ; CHECK: bb.1 (%ir-block.0): 43 ; CHECK: liveins: $x0 44 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 45 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (dereferenceable invariant load 4 from %ir.ptr) 46 ; CHECK: $w0 = COPY [[LOAD]](s32) 47 ; CHECK: RET_ReallyLR implicit $w0 48 %load = load i32, i32* %ptr, align 4, !invariant.load !0 49 ret i32 %load 50} 51 52define i32 @load_nontemporal(i32* %ptr) { 53 ; CHECK-LABEL: name: load_nontemporal 54 ; CHECK: bb.1 (%ir-block.0): 55 ; CHECK: liveins: $x0 56 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 57 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: (non-temporal load 4 from %ir.ptr) 58 ; CHECK: $w0 = COPY [[LOAD]](s32) 59 ; CHECK: RET_ReallyLR implicit $w0 60 %load = load i32, i32* %ptr, align 4, !nontemporal !0 61 ret i32 %load 62} 63 64define i32 @load_falkor_strided_access(i32* %ptr) { 65 ; CHECK-LABEL: name: load_falkor_strided_access 66 ; CHECK: bb.1 (%ir-block.0): 67 ; CHECK: liveins: $x0 68 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 69 ; CHECK: [[LOAD:%[0-9]+]]:_(s32) = G_LOAD [[COPY]](p0) :: ("aarch64-strided-access" load 4 from %ir.ptr) 70 ; CHECK: $w0 = COPY [[LOAD]](s32) 71 ; CHECK: RET_ReallyLR implicit $w0 72 %load = load i32, i32* %ptr, align 4, !falkor.strided.access !0 73 ret i32 %load 74} 75 76!0 = !{} 77