/external/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/ |
D | merge-stores.ll | 14 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 16 store i8 123, i8 addrspace(1)* %out.gep.1 24 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 26 store i8 123, i8 addrspace(1)* %out.gep.1 34 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 36 store i16 123, i16 addrspace(1)* %out.gep.1 44 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 46 store i16 0, i16 addrspace(1)* %out.gep.1 54 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 56 store i16 123, i16 addrspace(1)* %out.gep.1 [all …]
|
D | weird-type-accesses.ll | 20 %out.gep.1 = getelementptr i1, i1 addrspace(1)* %out, i32 1 21 store i1 true, i1 addrspace(1)* %out.gep.1 30 %out.gep.1 = getelementptr i2, i2 addrspace(1)* %out, i32 1 31 store i2 1, i2 addrspace(1)* %out.gep.1 41 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 43 store i8 123, i8 addrspace(1)* %out.gep.1 52 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out.i8, i32 1 53 store i8 123, i8 addrspace(1)* %out.gep.1 62 %out.gep.1 = getelementptr %struct.foo, %struct.foo addrspace(1)* %out, i32 1 63 store %struct.foo { i32 12, i8 3 }, %struct.foo addrspace(1)* %out.gep.1 [all …]
|
D | merge-stores-private.ll | 18 %out.gep.1 = getelementptr i32, i32* %out, i32 1 19 %out.gep.2 = getelementptr i32, i32* %out, i32 2 20 %out.gep.3 = getelementptr i32, i32* %out, i32 3 23 store i32 1, i32* %out.gep.1 24 store i32 23, i32* %out.gep.2 25 store i32 19, i32* %out.gep.3 32 %out.gep.1 = getelementptr i8, i8* %out, i32 1 33 %out.gep.2 = getelementptr i8, i8* %out, i32 2 34 %out.gep.3 = getelementptr i8, i8* %out, i32 3 37 store i8 1, i8* %out.gep.1 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | merge-stores.ll | 19 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 21 store i8 123, i8 addrspace(1)* %out.gep.1 31 %out.gep.1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 33 store i8 123, i8 addrspace(1)* %out.gep.1 41 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 43 store i16 123, i16 addrspace(1)* %out.gep.1 51 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 53 store i16 0, i16 addrspace(1)* %out.gep.1 63 %out.gep.1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 65 store i16 123, i16 addrspace(1)* %out.gep.1 [all …]
|
D | mad-combine.ll | 36 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 37 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 38 %gep.2 = getelementptr float, float addrspace(1)* %gep.0, i32 2 39 %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid 41 %a = load volatile float, float addrspace(1)* %gep.0 42 %b = load volatile float, float addrspace(1)* %gep.1 43 %c = load volatile float, float addrspace(1)* %gep.2 47 store float %fma, float addrspace(1)* %gep.out 75 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 76 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 [all …]
|
D | commute-compares.ll | 13 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid 14 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid 15 %val = load i32, i32 addrspace(1)* %gep.in 18 store i32 %ext, i32 addrspace(1)* %gep.out 26 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid 27 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid 28 %val = load i32, i32 addrspace(1)* %gep.in 31 store i32 %ext, i32 addrspace(1)* %gep.out 41 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid 42 %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid [all …]
|
D | fma-combine.ll | 18 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 19 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 20 %gep.2 = getelementptr double, double addrspace(1)* %gep.0, i32 2 21 %gep.out = getelementptr double, double addrspace(1)* %out, i32 %tid 23 %a = load volatile double, double addrspace(1)* %gep.0 24 %b = load volatile double, double addrspace(1)* %gep.1 25 %c = load volatile double, double addrspace(1)* %gep.2 29 store double %fma, double addrspace(1)* %gep.out 46 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 47 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 [all …]
|
D | fmuladd.ll | 41 %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid 42 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 43 %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid 45 %r1 = load volatile float, float addrspace(1)* %gep.0 46 %r2 = load volatile float, float addrspace(1)* %gep.1 49 store float %r3, float addrspace(1)* %gep.out 60 %gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid 61 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 62 %gep.out = getelementptr float, float addrspace(1)* %out, i32 %tid 64 %r1 = load volatile float, float addrspace(1)* %gep.0 [all …]
|
D | madmk.ll | 17 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 18 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 19 %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid 21 %a = load volatile float, float addrspace(1)* %gep.0, align 4 22 %b = load volatile float, float addrspace(1)* %gep.1, align 4 26 store float %madmk, float addrspace(1)* %out.gep, align 4 41 %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 42 %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1 43 %in.gep.2 = getelementptr float, float addrspace(1)* %in.gep.0, i32 2 45 %out.gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid [all …]
|
D | shift-and-i64-ubfe.ll | 13 %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x 14 %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x 15 %ld.64 = load i64, i64 addrspace(1)* %in.gep 18 store i64 %bit, i64 addrspace(1)* %out.gep 30 %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x 31 %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x 32 %ld.64 = load i64, i64 addrspace(1)* %in.gep 35 store i64 %bit, i64 addrspace(1)* %out.gep 46 %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %id.x 47 %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %id.x [all …]
|
D | fmax_legacy.f64.ll | 9 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 10 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 12 %a = load double, double addrspace(1)* %gep.0, align 8 13 %b = load double, double addrspace(1)* %gep.1, align 8 24 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 25 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 27 %a = load double, double addrspace(1)* %gep.0, align 8 28 %b = load double, double addrspace(1)* %gep.1, align 8 39 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 40 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 [all …]
|
D | madak.ll | 15 %in.a.gep = getelementptr float, float addrspace(1)* %in.a, i32 %tid 16 %in.b.gep = getelementptr float, float addrspace(1)* %in.b, i32 %tid 17 %out.gep = getelementptr float, float addrspace(1)* %out, i32 %tid 19 %a = load float, float addrspace(1)* %in.a.gep, align 4 20 %b = load float, float addrspace(1)* %in.b.gep, align 4 24 store float %madak, float addrspace(1)* %out.gep, align 4 43 %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 44 %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1 45 %in.gep.2 = getelementptr float, float addrspace(1)* %in.gep.0, i32 2 47 %out.gep.0 = getelementptr float, float addrspace(1)* %out, i32 %tid [all …]
|
D | fmax_legacy.ll | 18 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 19 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 21 %a = load volatile float, float addrspace(1)* %gep.0, align 4 22 %b = load volatile float, float addrspace(1)* %gep.1, align 4 38 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 39 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 41 %a = load volatile float, float addrspace(1)* %gep.0, align 4 42 %b = load volatile float, float addrspace(1)* %gep.1, align 4 58 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 59 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 [all …]
|
D | fmin_legacy.f64.ll | 19 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 20 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 22 %a = load double, double addrspace(1)* %gep.0, align 8 23 %b = load double, double addrspace(1)* %gep.1, align 8 34 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 35 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 37 %a = load double, double addrspace(1)* %gep.0, align 8 38 %b = load double, double addrspace(1)* %gep.1, align 8 49 %gep.0 = getelementptr double, double addrspace(1)* %in, i32 %tid 50 %gep.1 = getelementptr double, double addrspace(1)* %gep.0, i32 1 [all …]
|
D | commute_modifiers.ll | 13 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 14 %x = load float, float addrspace(1)* %gep.0 27 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 28 %x = load float, float addrspace(1)* %gep.0 42 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 43 %x = load float, float addrspace(1)* %gep.0 58 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 59 %x = load float, float addrspace(1)* %gep.0 73 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 74 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 [all …]
|
D | cgp-addressing-modes.ll | 18 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 999999 19 %in.gep = getelementptr i32, i32 addrspace(1)* %in, i64 7 25 %tmp1 = load i32, i32 addrspace(1)* %in.gep 30 store i32 %x, i32 addrspace(1)* %out.gep 38 ; OPT: %in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 65535 48 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i64 99999 49 %in.gep = getelementptr i8, i8 addrspace(1)* %in, i64 65535 55 %tmp1 = load i8, i8 addrspace(1)* %in.gep 61 store i32 %x, i32 addrspace(1)* %out.gep 75 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 1024 [all …]
|
D | fmin_legacy.ll | 51 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 52 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 54 %a = load volatile float, float addrspace(1)* %gep.0, align 4 55 %b = load volatile float, float addrspace(1)* %gep.1, align 4 70 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 71 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 73 %a = load volatile float, float addrspace(1)* %gep.0, align 4 74 %b = load volatile float, float addrspace(1)* %gep.1, align 4 89 %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid 90 %gep.1 = getelementptr float, float addrspace(1)* %gep.0, i32 1 [all …]
|
D | target-cpu.ll | 19 %kernargs.gep = getelementptr inbounds i8, i8 addrspace(2)* %kernargs, i64 1024 20 %kernargs.gep.cast = bitcast i8 addrspace(2)* %kernargs.gep to i32 addrspace(1)* addrspace(2)* 21 %ptr = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %kernargs.gep.cast 24 %gep = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i64 %id.ext 25 store i32 0, i32 addrspace(1)* %gep 35 %kernargs.gep = getelementptr inbounds i8, i8 addrspace(2)* %kernargs, i64 1024 36 %kernargs.gep.cast = bitcast i8 addrspace(2)* %kernargs.gep to i32 addrspace(1)* addrspace(2)* 37 %ptr = load i32 addrspace(1)*, i32 addrspace(1)* addrspace(2)* %kernargs.gep.cast 40 %gep = getelementptr inbounds i32, i32 addrspace(1)* %ptr, i64 %id.ext 41 store i32 0, i32 addrspace(1)* %gep [all …]
|
D | llvm.amdgcn.atomic.dec.ll | 27 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 28 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42) 47 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 48 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p3i32(i32 addrspace(3)* %gep, i32 42) 65 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 66 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42) 82 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 83 %result = call i32 @llvm.amdgcn.atomic.dec.i32.p1i32(i32 addrspace(1)* %gep, i32 42) 93 %gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id 94 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id [all …]
|
D | llvm.amdgcn.atomic.inc.ll | 27 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 28 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42) 47 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 48 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p3i32(i32 addrspace(3)* %gep, i32 42) 65 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 66 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42) 82 %gep = getelementptr i32, i32 addrspace(1)* %ptr, i32 4 83 %result = call i32 @llvm.amdgcn.atomic.inc.i32.p1i32(i32 addrspace(1)* %gep, i32 42) 93 %gep.tid = getelementptr i32, i32 addrspace(1)* %ptr, i32 %id 94 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %id [all …]
|
D | no-shrink-extloads.ll | 26 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid 27 %gep.out = getelementptr i16, i16 addrspace(1)* %out, i32 %tid 28 %load = load i32, i32 addrspace(1)* %gep.in 30 store i16 %trunc, i16 addrspace(1)* %gep.out 48 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid 49 %gep.out = getelementptr i8, i8 addrspace(1)* %out, i32 %tid 50 %load = load i32, i32 addrspace(1)* %gep.in 52 store i8 %trunc, i8 addrspace(1)* %gep.out 70 %gep.in = getelementptr i32, i32 addrspace(1)* %in, i32 %tid 71 %gep.out = getelementptr i1, i1 addrspace(1)* %out, i32 %tid [all …]
|
D | local-atomics.ll | 25 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 26 %result = atomicrmw xchg i32 addrspace(3)* %gep, i32 4 seq_cst 51 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 52 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst 65 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add 66 %result = atomicrmw add i32 addrspace(3)* %gep, i32 4 seq_cst 88 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 4 89 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst 102 %gep = getelementptr i32, i32 addrspace(3)* %ptr, i32 %add 103 %result = atomicrmw add i32 addrspace(3)* %gep, i32 1 seq_cst [all …]
|
D | ds_write2.ll | 14 %in.gep = getelementptr float, float addrspace(1)* %in, i32 %x.i 15 %val = load float, float addrspace(1)* %in.gep, align 4 32 %in.gep.0 = getelementptr float, float addrspace(1)* %in, i32 %x.i 33 %in.gep.1 = getelementptr float, float addrspace(1)* %in.gep.0, i32 1 34 %val0 = load volatile float, float addrspace(1)* %in.gep.0, align 4 35 %val1 = load volatile float, float addrspace(1)* %in.gep.1, align 4 51 %in0.gep = getelementptr float, float addrspace(1)* %in0, i32 %x.i 52 %in1.gep = getelementptr float, float addrspace(1)* %in1, i32 %x.i 53 %val0 = load volatile float, float addrspace(1)* %in0.gep, align 4 54 %val1 = load volatile float, float addrspace(1)* %in1.gep, align 4 [all …]
|
D | shift-and-i128-ubfe.ll | 16 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 17 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 18 %ld.64 = load i128, i128 addrspace(1)* %in.gep 21 store i128 %bit, i128 addrspace(1)* %out.gep 38 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 39 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x 40 %ld.64 = load i128, i128 addrspace(1)* %in.gep 43 store i128 %bit, i128 addrspace(1)* %out.gep 60 %in.gep = getelementptr i128, i128 addrspace(1)* %in, i32 %id.x 61 %out.gep = getelementptr i128, i128 addrspace(1)* %out, i32 %id.x [all …]
|
/external/llvm/test/Transforms/InstSimplify/ |
D | gep.ll | 12 %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv 13 ret %struct.A* %gep 22 %gep = getelementptr inbounds i8, i8* %b, i64 %sub 23 ret i8* %gep 33 %gep = getelementptr inbounds i64, i64* %b, i64 %ashr 34 ret i64* %gep 43 %gep = getelementptr inbounds %struct.A, %struct.A* %b, i64 %sdiv 44 ret %struct.A* %gep 52 %gep = getelementptr inbounds i8, i8* %b, i64 %sub 53 ret i8* %gep [all …]
|