/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | propagate_ir_flags.ll | 18 %load1 = load i32, i32* %idx1, align 4 23 %op1 = lshr exact i32 %load1, 1 44 %load1 = load i32, i32* %idx1, align 4 49 %op1 = lshr exact i32 %load1, 1 70 %load1 = load i32, i32* %idx1, align 4 75 %op1 = add nsw i32 %load1, 1 96 %load1 = load i32, i32* %idx1, align 4 101 %op1 = add nsw i32 %load1, 1 122 %load1 = load i32, i32* %idx1, align 4 127 %op1 = add nuw i32 %load1, 1 [all …]
|
D | pr27163.ll | 11 %load1 = load i64, i64* %gep1, align 8 13 store i64 %load1, i64* %gep1, align 8 27 %phi = phi i64 [ %load1, %catch.dispatch ], [ 9, %invoke.cont1 ]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans32_dspr2.c | 36 int load1, load2, load3, load4; in idct32_rows_dspr2() local 147 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 207 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 267 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 323 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 379 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 435 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 661 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 720 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 873 : [temp0] "=&r"(temp0), [load1] "=&r"(load1), [temp1] "=&r"(temp1), in idct32_rows_dspr2() [all …]
|
D | itrans32_cols_dspr2.c | 32 int load1, load2, load3, load4; in vpx_idct32_cols_add_blk_dspr2() local 103 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 163 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 223 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 279 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 335 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 391 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 617 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 676 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
|
D | convolve2_vert_dspr2.c | 30 uint32_t load1, load2; in convolve_bi_vert_4_dspr2() local 98 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_4_dspr2() 120 uint32_t load1, load2; in convolve_bi_vert_64_dspr2() local 188 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_64_dspr2()
|
D | convolve2_avg_dspr2.c | 30 uint32_t load1, load2; in convolve_bi_avg_vert_4_dspr2() local 105 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_avg_vert_4_dspr2() 129 uint32_t load1, load2; in convolve_bi_avg_vert_64_dspr2() local 205 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_avg_vert_64_dspr2()
|
D | convolve8_vert_dspr2.c | 30 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local 152 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_4_dspr2() 178 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local 301 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_64_dspr2()
|
D | convolve8_avg_dspr2.c | 31 uint32_t load1, load2, load3, load4; in convolve_avg_vert_4_dspr2() local 161 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_avg_vert_4_dspr2() 187 uint32_t load1, load2, load3, load4; in convolve_avg_vert_64_dspr2() local 318 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_avg_vert_64_dspr2()
|
D | itrans16_dspr2.c | 25 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_rows_dspr2() local 67 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_rows_dspr2() 189 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_rows_dspr2() 400 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_cols_add_blk_dspr2() local 452 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_cols_add_blk_dspr2() 574 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_cols_add_blk_dspr2()
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | propagate_ir_flags.ll | 18 %load1 = load i32, i32* %idx1, align 4 23 %op1 = lshr exact i32 %load1, 1 44 %load1 = load i32, i32* %idx1, align 4 49 %op1 = lshr exact i32 %load1, 1 70 %load1 = load i32, i32* %idx1, align 4 75 %op1 = add nsw i32 %load1, 1 96 %load1 = load i32, i32* %idx1, align 4 101 %op1 = add nsw i32 %load1, 1 122 %load1 = load i32, i32* %idx1, align 4 127 %op1 = add nuw i32 %load1, 1 [all …]
|
D | pr27163.ll | 11 %load1 = load i64, i64* %gep1, align 8 13 store i64 %load1, i64* %gep1, align 8 27 %phi = phi i64 [ %load1, %catch.dispatch ], [ 9, %invoke.cont1 ]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | fold-rmw-ops.ll | 31 %load1 = load i64, i64* @g64 33 %add = add i64 %load1, 16777214 64 %load1 = load i64, i64* @g64 66 %add = add i64 %load1, -2147483648 97 %load1 = load i64, i64* @g64 100 %add = add i64 %load1, 2147483648 132 %load1 = load i64, i64* @g64 136 %add = add i64 %load1, 2147483648 168 %load1 = load i64, i64* @g64 171 %add = add i64 %load1, 2147483649 [all …]
|
D | addr-mode-matcher.ll | 24 ; %load1 = (load (and (shl %xor, 2), 1020)) 29 %load1 = load i32, i32* %tmp1704, align 4 40 ; While matching xor we address-match %load1. The and-of-shift reassocication 42 ; node becomes identical to %load2. CSE replaces %load1 which leaves its 44 %tmp1711 = xor i32 %load1, %tmp1710
|
/external/llvm/test/Transforms/LoadCombine/ |
D | load-combine-aa.ll | 11 %load1 = load i32, i32* %a, align 4 12 %conv = zext i32 %load1 to i64 14 store i32 %load1, i32* %b, align 4 29 %load1 = load i32, i32* %a, align 4 30 %conv = zext i32 %load1 to i64 32 store i32 %load1, i32* %b, align 4
|
D | load-combine-assume.ll | 15 %load1 = load i32, i32* %a, align 4 16 %conv = zext i32 %load1 to i64 34 %load1 = load i32, i32* %a, align 4 35 %conv = zext i32 %load1 to i64
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/AArch64/ |
D | free-zext.ll | 11 %load1 = load i32, i32* %ptr, align 4 23 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %bb2 ] 36 %load1 = load i32, i32* %ptr, align 4 41 %trunc = trunc i32 %load1 to i16 46 %shl = shl i32 %load1, 16 53 %and = and i32 %load1, 65535 65 %load1 = load i32, i32* %ptr, align 4 70 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %loop ]
|
/external/llvm/test/Transforms/CodeGenPrepare/AArch64/ |
D | free-zext.ll | 11 %load1 = load i32, i32* %ptr, align 4 23 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %bb2 ] 36 %load1 = load i32, i32* %ptr, align 4 41 %trunc = trunc i32 %load1 to i16 46 %shl = shl i32 %load1, 16 53 %and = and i32 %load1, 65535 65 %load1 = load i32, i32* %ptr, align 4 70 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %loop ]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | convolve2_vert_dspr2.c | 31 uint32_t load1, load2; in convolve_bi_vert_4_dspr2() local 99 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_4_dspr2() 121 uint32_t load1, load2; in convolve_bi_vert_64_dspr2() local 189 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_64_dspr2()
|
D | convolve8_vert_dspr2.c | 32 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local 154 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_4_dspr2() 180 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local 303 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_64_dspr2()
|
/external/llvm/test/CodeGen/X86/ |
D | addr-mode-matcher.ll | 24 ; %load1 = (load (and (shl %xor, 2), 1020)) 29 %load1 = load i32, i32* %tmp1704, align 4 40 ; While matching xor we address-match %load1. The and-of-shift reassocication 42 ; node becomes identical to %load2. CSE replaces %load1 which leaves its 44 %tmp1711 = xor i32 %load1, %tmp1710
|
/external/llvm/test/CodeGen/AArch64/ |
D | ldst-opt.ll | 291 %load1 = load %pre.struct.i32*, %pre.struct.i32** %this 292 %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1 309 %load1 = load %pre.struct.i64*, %pre.struct.i64** %this 310 %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1 327 %load1 = load %pre.struct.i128*, %pre.struct.i128** %this 328 %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1 345 %load1 = load %pre.struct.float*, %pre.struct.float** %this 346 %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1 363 %load1 = load %pre.struct.double*, %pre.struct.double** %this 364 %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1 [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/ |
D | ldst-opt.ll | 292 %load1 = load %pre.struct.i32*, %pre.struct.i32** %this 293 %gep1 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load1, i64 0, i32 1 310 %load1 = load %pre.struct.i64*, %pre.struct.i64** %this 311 %gep1 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load1, i64 0, i32 1 328 %load1 = load %pre.struct.i128*, %pre.struct.i128** %this 329 %gep1 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load1, i64 0, i32 1 346 %load1 = load %pre.struct.float*, %pre.struct.float** %this 347 %gep1 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load1, i64 0, i32 1 364 %load1 = load %pre.struct.double*, %pre.struct.double** %this 365 %gep1 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load1, i64 0, i32 1 [all …]
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | vselect.ll | 16 %load1 = load <2 x i32>, <2 x i32> addrspace(1)* %in1 17 %cmp = icmp sgt <2 x i32> %load0, %load1 58 %load1 = load <4 x i32>, <4 x i32> addrspace(1)* %in1 59 %cmp = icmp sgt <4 x i32> %load0, %load1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AMDGPU/ |
D | vselect.ll | 18 %load1 = load <2 x i32>, <2 x i32> addrspace(1)* %in1 19 %cmp = icmp sgt <2 x i32> %load0, %load1 61 %load1 = load <4 x i32>, <4 x i32> addrspace(1)* %in1 62 %cmp = icmp sgt <4 x i32> %load0, %load1
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | horizontal-store.ll | 37 …%load1 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 0), align… 39 %cmp1 = icmp sgt i32 %load1, %load2 40 %select1 = select i1 %cmp1, i32 %load1, i32 %load2 84 …%load1 = load i64, i64* getelementptr inbounds ([32 x i64], [32 x i64]* @arr64, i64 0, i64 0), ali… 86 %cmp1 = icmp slt i64 %load1, %load2 87 %select1 = select i1 %cmp1, i64 %load1, i64 %load2 131 …%load1 = load float, float* getelementptr inbounds ([32 x float], [32 x float]* @farr, i64 0, i64 … 133 %cmp1 = fcmp fast ogt float %load1, %load2 134 %select1 = select i1 %cmp1, float %load1, float %load2 178 …%load1 = load double, double* getelementptr inbounds ([32 x double], [32 x double]* @darr, i64 0, … [all …]
|