/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/MemorySSA/ |
D | invariant-groups.ll | 3 ; Currently, MemorySSA doesn't support invariant groups. So, we should ignore 4 ; launder.invariant.group intrinsics entirely. We'll need to pay attention to 5 ; them when/if we decide to support invariant groups. 12 store i32 0, i32* %a, align 4, !invariant.group !0 20 ; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1) 21 %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1) 25 ; invariant.group. 28 %2 = load i32, i32* %a32, align 4, !invariant.group !0 35 store i32 0, i32* %a, align 4, !invariant.group !0 39 ; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | invariant.group.ll | 7 %b2 = call i8* @llvm.launder.invariant.group.p0i8(i8* null) 13 ; CHECK-NEXT: call i8* @llvm.launder.invariant.group.p0i8(i8* null) 14 %b2 = call i8* @llvm.launder.invariant.group.p0i8(i8* null) 20 ; CHECK: %b2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* null) 22 %b2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* null) 29 %b2 = call i8* @llvm.launder.invariant.group.p0i8(i8* undef) 36 %b2 = call i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)* undef) 43 %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* null) 49 ; CHECK-NEXT: call i8* @llvm.strip.invariant.group.p0i8(i8* null) 50 %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* null) [all …]
|
D | invariant.ll | 1 ; Test to make sure unused llvm.invariant.start calls are not trivially eliminated 7 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly 8 declare {}* @llvm.invariant.start.p1i8(i64, i8 addrspace(1)* nocapture) nounwind readonly 13 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) ; <{}*> [#uses=0] 14 ; CHECK: call {}* @llvm.invariant.start.p0i8 20 ; make sure llvm.invariant.call in non-default addrspace are also not eliminated. 23 %i = call {}* @llvm.invariant.start.p1i8(i64 1, i8 addrspace(1)* %a) ; <{}*> [#uses=0] 24 ; CHECK: call {}* @llvm.invariant.start.p1i8
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Other/ |
D | invariant.group.ll | 7 ; launder.invariant.group, that is prohibited if there is a memory clobber 14 store i8 42, i8* %ptr, !invariant.group !0 15 ; CHECK: call i8* @llvm.launder.invariant.group.p0i8 16 %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 18 ; CHECK: call i8* @llvm.launder.invariant.group 19 %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 27 ; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group 28 %v = load i8, i8* %ptr3, !invariant.group !0 37 store i8 42, i8* %ptr, !invariant.group !0 38 ; CHECK: call i8* @llvm.launder.invariant.group.p0i8 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GVN/ |
D | invariant.group.ll | 13 store i8 42, i8* %ptr, !invariant.group !0 16 %a = load i8, i8* %ptr, !invariant.group !0 17 %b = load i8, i8* %ptr, !invariant.group !0 18 %c = load i8, i8* %ptr, !invariant.group !0 27 store i8 42, i8* %ptr, !invariant.group !0 28 %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 29 %a = load i8, i8* %ptr, !invariant.group !0 40 store i8 42, i8* %ptr, !invariant.group !0 43 store i8 13, i8* %ptr ; can't use this store with invariant.group 48 %b = load i8, i8* %ptr, !invariant.group !0 [all …]
|
D | invariant.start.ll | 1 ; Test to make sure llvm.invariant.start calls are not treated as clobbers. 5 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly 6 declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind 8 ; We forward store to the load across the invariant.start intrinsic 11 ; CHECK: call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 16 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 24 ; i.e. invariant.start is in another basic block. 27 ; CHECK: call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 32 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 44 ; We should not value forward %foo to the invariant.end corresponding to %bar. [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/NewGVN/ |
D | invariant.group.ll | 14 store i8 42, i8* %ptr, !invariant.group !0 17 %a = load i8, i8* %ptr, !invariant.group !0 18 %b = load i8, i8* %ptr, !invariant.group !0 19 %c = load i8, i8* %ptr, !invariant.group !0 28 store i8 42, i8* %ptr, !invariant.group !0 29 %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 30 %a = load i8, i8* %ptr, !invariant.group !0 41 store i8 42, i8* %ptr, !invariant.group !0 44 store i8 13, i8* %ptr ; can't use this store with invariant.group 49 %b = load i8, i8* %ptr, !invariant.group !0 [all …]
|
D | invariant.start.ll | 1 ; Test to make sure llvm.invariant.start calls are not treated as clobbers. 5 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly 6 declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind 8 ; We forward store to the load across the invariant.start intrinsic 11 ; CHECK: call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 16 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 24 ; i.e. invariant.start is in another basic block. 27 ; CHECK: call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 32 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %a) 44 ; We should not value forward %foo to the invariant.end corresponding to %bar. [all …]
|
/external/llvm/test/Transforms/GVN/ |
D | invariant.group.ll | 13 store i8 42, i8* %ptr, !invariant.group !0 16 %a = load i8, i8* %ptr, !invariant.group !0 17 %b = load i8, i8* %ptr, !invariant.group !0 18 %c = load i8, i8* %ptr, !invariant.group !0 27 store i8 42, i8* %ptr, !invariant.group !0 28 %ptr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr) 29 %a = load i8, i8* %ptr, !invariant.group !0 40 store i8 42, i8* %ptr, !invariant.group !0 43 store i8 13, i8* %ptr ; can't use this store with invariant.group 48 %b = load i8, i8* %ptr, !invariant.group !0 [all …]
|
D | invariant-load.ll | 1 ; Test if the !invariant.load metadata is maintained by GVN. 6 ; CHECK: %x = load i32, i32* %p, align 4, !invariant.load !0 9 %x = load i32, i32* %p, align 4, !invariant.load !0 12 %y = load i32, i32* %p, align 4, !invariant.load !0 19 ; CHECK-NOT: !invariant.load 25 %y = load i32, i32* %p, align 4, !invariant.load !0 30 ; With the invariant.load metadata, what would otherwise 44 %v2 = load i32, i32* %p, !invariant.load !0 55 %v1 = load i32, i32* %p, !invariant.load !0 77 %v1 = load i32, i32* %p, !invariant.load !0 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/ |
D | invariant.group.ll | 8 ; CHECK-NOT: !invariant.group 9 ; CHECK-NOT: @llvm.launder.invariant.group.p0i8( 11 %val = load i8, i8* @tmp, !invariant.group !0 12 %ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp) 15 store i8 42, i8* %ptr, !invariant.group !0 24 ; CHECK-NOT: !invariant.group 25 ; CHECK-NOT: @llvm.strip.invariant.group.p0i8( 27 %val = load i8, i8* @tmp, !invariant.group !0 28 %ptr = call i8* @llvm.strip.invariant.group.p0i8(i8* @tmp) 31 store i8 42, i8* %ptr, !invariant.group !0 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/DeadStoreElimination/ |
D | launder.invariant.group.ll | 7 ; CHECK: %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 8 %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 18 ; CHECK: %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 19 %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr) 22 %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr2) 23 %ptr4 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr3) 34 ; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr) 35 %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr) 38 %ptr3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr2) 39 %ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Bitcode/ |
D | upgrade-invariant-group-barrier.ll | 4 ; it was renamed to launder.invariant.group 7 ; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1) 8 ; CHECK: %p3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1) 9 ; CHECK: %p4 = call i16* @llvm.launder.invariant.group.p0i16(i16* %p16) 10 %p2 = call i8* @llvm.invariant.group.barrier(i8* %p1) 11 %p3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p1) 12 %p4 = call i16* @llvm.invariant.group.barrier.p0i16(i16* %p16) 17 ; CHECK: declare i8* @llvm.launder.invariant.group.p0i8(i8*) 19 ; CHECK: declare i16* @llvm.launder.invariant.group.p0i16(i16*) 20 declare i8* @llvm.invariant.group.barrier(i8*) [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/EarlyCSE/ |
D | invariant.start.ll | 4 declare {}* @llvm.invariant.start.p0i8(i64, i8* nocapture) nounwind readonly 5 declare void @llvm.invariant.end.p0i8({}*, i64, i8* nocapture) nounwind 7 ; Check that we do load-load forwarding over invariant.start, since it does not 12 ; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P) 16 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P) 23 ; Trivial Store->load forwarding over invariant.start 27 ; CHECK-NEXT: %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P) 31 %i = call {}* @llvm.invariant.start.p0i8(i64 1, i8* %P) 36 ; We can DSE over invariant.start calls, since the first store to 38 ; of invariant.start. [all …]
|
D | invariant-loads.ll | 8 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 14 %val0 = load i32, i32* %ptr, !invariant.load !{} 16 %val1 = load i32, i32* %ptr, !invariant.load !{} 18 %val2 = load i32, i32* %ptr, !invariant.load !{} 24 ; We can forward invariant loads to non-invariant loads. 27 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 31 %val0 = load i32, i32* %ptr, !invariant.load !{} 39 ; We can forward a non-invariant load into an invariant load. 48 %val1 = load i32, i32* %ptr, !invariant.load !{} 55 %val0 = load i32, i32* %ptr, !invariant.load !{} [all …]
|
/external/llvm/test/Transforms/EarlyCSE/ |
D | invariant-loads.ll | 7 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 13 %val0 = load i32, i32* %ptr, !invariant.load !{} 15 %val1 = load i32, i32* %ptr, !invariant.load !{} 17 %val2 = load i32, i32* %ptr, !invariant.load !{} 23 ; We can forward invariant loads to non-invariant loads, since once an 24 ; invariant load has executed, the location loaded from is known to be 28 ; CHECK: %val0 = load i32, i32* %ptr, !invariant.load !0 32 %val0 = load i32, i32* %ptr, !invariant.load !{} 40 ; Negative test -- we can't forward a non-invariant load into an 41 ; invariant load. [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GVN/PRE/ |
D | invariant-load.ll | 1 ; Test if the !invariant.load metadata is maintained by GVN. 6 ; CHECK: %x = load i32, i32* %p, align 4, !invariant.load !0 9 %x = load i32, i32* %p, align 4, !invariant.load !0 12 %y = load i32, i32* %p, align 4, !invariant.load !0 19 ; CHECK-NOT: !invariant.load 25 %y = load i32, i32* %p, align 4, !invariant.load !0 30 ; With the invariant.load metadata, what would otherwise 44 %v2 = load i32, i32* %p, !invariant.load !0 55 %v1 = load i32, i32* %p, !invariant.load !0 77 %v1 = load i32, i32* %p, !invariant.load !0 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | memory-legalizer-region.mir | 16 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 17 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 40 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 41 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 64 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 65 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 88 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 89 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 112 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 113 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … [all …]
|
D | memory-legalizer-local.mir | 16 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 17 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 40 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 41 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 64 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 65 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 88 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 89 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … 112 …$sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 36, 0 :: (dereferenceable invariant load 4 from `i32 addrs… 113 …$sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 44, 0 :: (dereferenceable invariant load 8 … [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/ValueTracking/ |
D | invariant.group.ll | 5 ; CHECK: %[[p:.*]] = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0) 10 %p = call i8* @llvm.launder.invariant.group.p0i8(i8* %0) 11 %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p) 19 ; CHECK: %[[p:.*]] = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %0) 24 %p = call i8* @llvm.strip.invariant.group.p0i8(i8* %0) 25 %p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %p) 31 declare i8* @llvm.launder.invariant.group.p0i8(i8*) 32 declare i8* @llvm.strip.invariant.group.p0i8(i8*)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/GlobalISel/ |
D | select-memop-scalar-x32.mir | 60 …; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load … 65 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) 85 …; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load … 90 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) 110 …; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load … 115 %0(p0) = G_LOAD %1(p0) :: (invariant load 4 from %fixed-stack.0, align 0) 137 …; ALL: [[MOV8rm:%[0-9]+]]:gr8 = MOV8rm %fixed-stack.0, 1, $noreg, 0, $noreg :: (invariant load 1 f… 138 …; ALL: [[MOV32rm:%[0-9]+]]:gr32 = MOV32rm %fixed-stack.1, 1, $noreg, 0, $noreg :: (invariant load … 143 %0(s8) = G_LOAD %2(p0) :: (invariant load 1 from %fixed-stack.1, align 0) 145 %1(p0) = G_LOAD %3(p0) :: (invariant load 4 from %fixed-stack.0, align 0) [all …]
|
/external/llvm/test/Transforms/GlobalOpt/ |
D | invariant.group.barrier.ll | 36 %barr = call i8* @llvm.invariant.group.barrier(i8* %0) 44 ; We can't step through invariant.group.barrier here, because that would change 46 ; val = load i32, i32* %ptrVal, !invariant.group !0 48 ; %val = load i32, i32* @tmp3, !invariant.group !0 54 store i32 13, i32* @tmp3, !invariant.group !0 57 %barr = call i8* @llvm.invariant.group.barrier(i8* %0) 61 store i32 42, i32* %1, !invariant.group !0 68 %val = load i32, i32* %ptrVal, !invariant.group !0 71 %val2 = load i32, i32* @tmp3, !invariant.group !0 77 declare i8* @llvm.invariant.group.barrier(i8*)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/GlobalOpt/ |
D | invariant.group.ll | 36 %barr = call i8* @llvm.launder.invariant.group(i8* %0) 44 ; We can't step through launder.invariant.group here, because that would change 46 ; val = load i32, i32* %ptrVal, !invariant.group !0 48 ; %val = load i32, i32* @tmp3, !invariant.group !0 54 store i32 13, i32* @tmp3, !invariant.group !0 57 %barr = call i8* @llvm.launder.invariant.group(i8* %0) 61 store i32 42, i32* %1, !invariant.group !0 68 %val = load i32, i32* %ptrVal, !invariant.group !0 71 %val2 = load i32, i32* @tmp3, !invariant.group !0 77 declare i8* @llvm.launder.invariant.group(i8*)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/RewriteStatepointsForGC/ |
D | drop-invalid-metadata.ll | 11 ; Confirm that loadedval instruction does not contain invariant.load metadata. 13 ; Since loadedval is not marked invariant, it will prevent incorrectly sinking 23 ; LICM may sink this load to exit block after RS4GC because it's tagged invariant. 24 %loadedval = load i32, i32 addrspace(1)* %baseaddr, align 8, !range !0, !invariant.load !1 79 ; invariant.start allows us to sink the load past the baz statepoint call into taken block, which is 80 ; incorrect. remove the invariant.start and RAUW undef. 83 ; CHECK-NOT: invariant.start 86 %invst = call {}* @llvm.invariant.start.p1i32(i64 1, i32 addrspace(1)* %v1) 93 call void @llvm.invariant.end.p1i32({}* %invst, i64 4, i32 addrspace(1)* %v1) 99 %foo = call i32 @escaping.invariant.start({}* %invst) [all …]
|
/external/llvm/test/Transforms/CodeGenPrepare/ |
D | invariant.group.ll | 8 ; CHECK-NOT: !invariant.group 9 ; CHECK-NOT: @llvm.invariant.group.barrier( 11 %val = load i8, i8* @tmp, !invariant.group !0 12 %ptr = call i8* @llvm.invariant.group.barrier(i8* @tmp) 15 store i8 42, i8* %ptr, !invariant.group !0 21 declare i8* @llvm.invariant.group.barrier(i8*)
|