Home
last modified time | relevance | path

Searched refs:load2 (Results 1 – 25 of 75) sorted by relevance

123

/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/
Dpropagate_ir_flags.ll19 %load2 = load i32, i32* %idx2, align 4
24 %op2 = lshr exact i32 %load2, 1
45 %load2 = load i32, i32* %idx2, align 4
50 %op2 = lshr i32 %load2, 1
71 %load2 = load i32, i32* %idx2, align 4
76 %op2 = add nsw i32 %load2, 1
97 %load2 = load i32, i32* %idx2, align 4
102 %op2 = add nsw i32 %load2, 1
123 %load2 = load i32, i32* %idx2, align 4
128 %op2 = add nuw i32 %load2, 1
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Ditrans32_dspr2.c36 int load1, load2, load3, load4; in idct32_rows_dspr2() local
147 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
207 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
267 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
323 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
379 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
435 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
661 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
720 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2()
874 [load2] "=&r"(load2), [temp2] "=&r"(temp2), [load3] "=&r"(load3), in idct32_rows_dspr2()
[all …]
Ditrans32_cols_dspr2.c32 int load1, load2, load3, load4; in vpx_idct32_cols_add_blk_dspr2() local
103 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
163 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
223 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
279 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
335 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
391 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
617 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
676 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
Dconvolve2_vert_dspr2.c30 uint32_t load1, load2; in convolve_bi_vert_4_dspr2() local
98 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_4_dspr2()
120 uint32_t load1, load2; in convolve_bi_vert_64_dspr2() local
188 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_64_dspr2()
Dconvolve2_avg_dspr2.c30 uint32_t load1, load2; in convolve_bi_avg_vert_4_dspr2() local
105 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_avg_vert_4_dspr2()
129 uint32_t load1, load2; in convolve_bi_avg_vert_64_dspr2() local
205 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_avg_vert_64_dspr2()
Dconvolve8_vert_dspr2.c30 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local
152 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_4_dspr2()
178 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local
301 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_64_dspr2()
Dconvolve8_avg_dspr2.c31 uint32_t load1, load2, load3, load4; in convolve_avg_vert_4_dspr2() local
161 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_avg_vert_4_dspr2()
187 uint32_t load1, load2, load3, load4; in convolve_avg_vert_64_dspr2() local
318 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_avg_vert_64_dspr2()
Ditrans16_dspr2.c25 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_rows_dspr2() local
67 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_rows_dspr2()
189 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_rows_dspr2()
400 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_cols_add_blk_dspr2() local
452 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_cols_add_blk_dspr2()
574 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_cols_add_blk_dspr2()
/external/llvm/test/Transforms/SLPVectorizer/X86/
Dpropagate_ir_flags.ll19 %load2 = load i32, i32* %idx2, align 4
24 %op2 = lshr exact i32 %load2, 1
45 %load2 = load i32, i32* %idx2, align 4
50 %op2 = lshr i32 %load2, 1
71 %load2 = load i32, i32* %idx2, align 4
76 %op2 = add nsw i32 %load2, 1
97 %load2 = load i32, i32* %idx2, align 4
102 %op2 = add nsw i32 %load2, 1
123 %load2 = load i32, i32* %idx2, align 4
128 %op2 = add nuw i32 %load2, 1
[all …]
/external/libaom/libaom/aom_dsp/mips/
Dconvolve2_vert_dspr2.c31 uint32_t load1, load2; in convolve_bi_vert_4_dspr2() local
99 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_4_dspr2()
121 uint32_t load1, load2; in convolve_bi_vert_64_dspr2() local
189 : [load1] "=&r"(load1), [load2] "=&r"(load2), [p1] "=&r"(p1), in convolve_bi_vert_64_dspr2()
Dconvolve8_vert_dspr2.c32 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local
154 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_4_dspr2()
180 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local
303 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_64_dspr2()
/external/llvm/test/CodeGen/AArch64/
Dldst-opt.ll286 %pre.struct.i32* %load2) nounwind {
295 %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
304 %pre.struct.i64* %load2) nounwind {
313 %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
322 %pre.struct.i128* %load2) nounwind {
331 %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
340 %pre.struct.float* %load2) nounwind {
349 %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
358 %pre.struct.double* %load2) nounwind {
367 %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/AArch64/
Dldst-opt.ll287 %pre.struct.i32* %load2) nounwind {
296 %gep2 = getelementptr inbounds %pre.struct.i32, %pre.struct.i32* %load2, i64 0, i32 2
305 %pre.struct.i64* %load2) nounwind {
314 %gep2 = getelementptr inbounds %pre.struct.i64, %pre.struct.i64* %load2, i64 0, i32 2
323 %pre.struct.i128* %load2) nounwind {
332 %gep2 = getelementptr inbounds %pre.struct.i128, %pre.struct.i128* %load2, i64 0, i32 2
341 %pre.struct.float* %load2) nounwind {
350 %gep2 = getelementptr inbounds %pre.struct.float, %pre.struct.float* %load2, i64 0, i32 2
359 %pre.struct.double* %load2) nounwind {
368 %gep2 = getelementptr inbounds %pre.struct.double, %pre.struct.double* %load2, i64 0, i32 2
[all …]
/external/llvm/test/Transforms/LoadCombine/
Dload-combine-aa.ll15 %load2 = load i32, i32* %arrayidx1, align 4
16 %conv2 = zext i32 %load2 to i64
33 %load2 = load i32, i32* %arrayidx1, align 4
34 %conv2 = zext i32 %load2 to i64
Dload-combine-assume.ll18 %load2 = load i32, i32* %arrayidx1, align 4
20 %conv2 = zext i32 %load2 to i64
38 %load2 = load i32, i32* %arrayidx1, align 4
39 %conv2 = zext i32 %load2 to i64
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/
Daddr-mode-matcher.ll31 ; %load2 = (load (shl (and %xor, 255), 2))
36 %load2 = load i32, i32* %tmp1708, align 4
38 %tmp1710 = or i32 %load2, %a
42 ; node becomes identical to %load2. CSE replaces %load1 which leaves its
Dpr32420.ll26 %load2 = load i16, i16* @a, align 4
27 %shl3 = shl i16 %load2, 12
36 %cast1629 = zext i16 %load2 to i32
/external/llvm/test/CodeGen/X86/
Daddr-mode-matcher.ll31 ; %load2 = (load (shl (and %xor, 255), 2))
36 %load2 = load i32, i32* %tmp1708, align 4
38 %tmp1710 = or i32 %load2, %a
42 ; node becomes identical to %load2. CSE replaces %load1 which leaves its
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/CodeGenPrepare/AArch64/
Dfree-zext.ll18 %load2 = load i32, i32* %ptr2, align 4
23 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %bb2 ]
70 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %loop ]
77 %load2 = load i32, i32* %addr, align 4
/external/llvm/test/Transforms/CodeGenPrepare/AArch64/
Dfree-zext.ll18 %load2 = load i32, i32* %ptr2, align 4
23 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %bb2 ]
70 %phi = phi i32 [ %load1, %bb1 ], [ %load2, %loop ]
77 %load2 = load i32, i32* %addr, align 4
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/SPARC/
Dselect-mask.ll13 %bf.load2 = load i8, i8* %this, align 4
14 %bf.cast5 = trunc i8 %bf.load2 to i1
/external/llvm/test/CodeGen/SPARC/
Dselect-mask.ll13 %bf.load2 = load i8, i8* %this, align 4
14 %bf.cast5 = trunc i8 %bf.load2 to i1
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/ARM/
Dneon_cmp.ll8 %wide.load2 = load <2 x double>, <2 x double>* %b, align 4
11 %v1 = fcmp olt <2 x double> %wide.load, %wide.load2
/external/llvm/test/CodeGen/ARM/
Dneon_cmp.ll8 %wide.load2 = load <2 x double>, <2 x double>* %b, align 4
11 %v1 = fcmp olt <2 x double> %wide.load, %wide.load2
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/AMDGPU/
Dhorizontal-store.ll38 …%load2 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 1), align…
39 %cmp1 = icmp sgt i32 %load1, %load2
40 %select1 = select i1 %cmp1, i32 %load1, i32 %load2
85 …%load2 = load i64, i64* getelementptr inbounds ([32 x i64], [32 x i64]* @arr64, i64 0, i64 1), ali…
86 %cmp1 = icmp slt i64 %load1, %load2
87 %select1 = select i1 %cmp1, i64 %load1, i64 %load2
132 …%load2 = load float, float* getelementptr inbounds ([32 x float], [32 x float]* @farr, i64 0, i64 …
133 %cmp1 = fcmp fast ogt float %load1, %load2
134 %select1 = select i1 %cmp1, float %load1, float %load2
179 …%load2 = load double, double* getelementptr inbounds ([32 x double], [32 x double]* @darr, i64 0, …
[all …]

123