/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans32_dspr2.c | 36 int load1, load2, load3, load4; in idct32_rows_dspr2() local 147 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 207 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 267 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 323 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 379 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 435 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 661 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 720 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct32_rows_dspr2() 874 [load2] "=&r"(load2), [temp2] "=&r"(temp2), [load3] "=&r"(load3), in idct32_rows_dspr2() [all …]
|
D | itrans32_cols_dspr2.c | 32 int load1, load2, load3, load4; in vpx_idct32_cols_add_blk_dspr2() local 103 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 163 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 223 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 279 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 335 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 391 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 617 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2() 676 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in vpx_idct32_cols_add_blk_dspr2()
|
D | convolve8_vert_dspr2.c | 30 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local 152 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_4_dspr2() 178 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local 301 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_64_dspr2()
|
D | convolve8_avg_dspr2.c | 31 uint32_t load1, load2, load3, load4; in convolve_avg_vert_4_dspr2() local 161 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_avg_vert_4_dspr2() 187 uint32_t load1, load2, load3, load4; in convolve_avg_vert_64_dspr2() local 318 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_avg_vert_64_dspr2()
|
D | itrans16_dspr2.c | 25 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_rows_dspr2() local 67 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_rows_dspr2() 189 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_rows_dspr2() 400 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_cols_add_blk_dspr2() local 452 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_cols_add_blk_dspr2() 574 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in idct16_cols_add_blk_dspr2()
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | propagate_ir_flags.ll | 20 %load3 = load i32, i32* %idx3, align 4 25 %op3 = lshr exact i32 %load3, 1 46 %load3 = load i32, i32* %idx3, align 4 51 %op3 = lshr exact i32 %load3, 1 72 %load3 = load i32, i32* %idx3, align 4 77 %op3 = add nsw i32 %load3, 1 98 %load3 = load i32, i32* %idx3, align 4 103 %op3 = add nsw i32 %load3, 1 124 %load3 = load i32, i32* %idx3, align 4 129 %op3 = add nuw i32 %load3, 1 [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | convolve8_vert_dspr2.c | 32 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local 154 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_4_dspr2() 180 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local 303 : [load1] "=&r"(load1), [load2] "=&r"(load2), [load3] "=&r"(load3), in convolve_vert_64_dspr2()
|
/external/pdfium/third_party/libpng16/intel/ |
D | filter_sse2_intrinsics.c | 41 static __m128i load3(const void* p) { in load3() function 75 a = d; d = load3(row); in png_read_filter_row_sub3_sse2() 147 b = load3(prev); in png_read_filter_row_avg3_sse2() 148 a = d; d = load3(row ); in png_read_filter_row_avg3_sse2() 295 c = b; b = _mm_unpacklo_epi8(load3(prev), zero); in png_read_filter_row_paeth3_sse2() 296 a = d; d = _mm_unpacklo_epi8(load3(row ), zero); in png_read_filter_row_paeth3_sse2()
|
/external/libpng/intel/ |
D | filter_sse2_intrinsics.c | 41 static __m128i load3(const void* p) { in load3() function 75 a = d; d = load3(row); in png_read_filter_row_sub3_sse2() 147 b = load3(prev); in png_read_filter_row_avg3_sse2() 148 a = d; d = load3(row ); in png_read_filter_row_avg3_sse2() 295 c = b; b = _mm_unpacklo_epi8(load3(prev), zero); in png_read_filter_row_paeth3_sse2() 296 a = d; d = _mm_unpacklo_epi8(load3(row ), zero); in png_read_filter_row_paeth3_sse2()
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | horizontal-store.ll | 42 …%load3 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 2), align… 43 %cmp2 = icmp sgt i32 %select1, %load3 44 %select2 = select i1 %cmp2, i32 %select1, i32 %load3 89 …%load3 = load i64, i64* getelementptr inbounds ([32 x i64], [32 x i64]* @arr64, i64 0, i64 2), ali… 90 %cmp2 = icmp slt i64 %select1, %load3 91 %select2 = select i1 %cmp2, i64 %select1, i64 %load3 140 …%load3 = load float, float* getelementptr inbounds ([32 x float], [32 x float]* @farr, i64 0, i64 … 141 %cmp2 = fcmp fast ogt float %select1, %load3 142 %select2 = select i1 %cmp2, float %select1, float %load3 191 …%load3 = load double, double* getelementptr inbounds ([32 x double], [32 x double]* @darr, i64 0, … [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | propagate_ir_flags.ll | 31 %load3 = load i32, i32* %idx3, align 4 36 %op3 = lshr exact i32 %load3, 1 67 %load3 = load i32, i32* %idx3, align 4 72 %op3 = lshr exact i32 %load3, 1 103 %load3 = load i32, i32* %idx3, align 4 108 %op3 = add nsw i32 %load3, 1 139 %load3 = load i32, i32* %idx3, align 4 144 %op3 = add nsw i32 %load3, 1 175 %load3 = load i32, i32* %idx3, align 4 180 %op3 = add nuw i32 %load3, 1 [all …]
|
/external/llvm-project/llvm/test/Analysis/MemorySSA/ |
D | pr36883.ll | 22 %load3 = call <8 x i16> @llvm.arm.neon.vld1.v8i16.p0i8(i8* %p, i32 2) #4 ; load CSE removed 25 %ret = add <8 x i16> %add, %load3
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | promote-constOffset-to-imm.ll | 51 %load3 = load i64, i64 addrspace(1)* %add.ptr8.2, align 8 52 %add.2 = add i64 %load3, %add.1 260 %load3 = load i32, i32 addrspace(1)* %add.ptr8.2, align 4 261 %add.2 = add i32 %load3, %add.1 328 %load3 = load i64, i64 addrspace(1)* %addr3, align 8 330 %add2 = add i64 %load3, %add1 374 %load3 = load i32, i32 addrspace(1)* %addr3, align 8 376 %add2 = add i32 %load3, %add1 426 %load3 = load i64, i64 addrspace(1)* %add.ptr8.4, align 8 427 %add2 = add i64 %load3, %add1 [all …]
|
D | function-returns.ll | 612 %load3 = load volatile i32, i32 addrspace(3)* undef 618 %insert.4 = insertvalue { <3 x i32>, i32 } %insert.3, i32 %load3, 1 631 %load3 = load volatile i32, i32 addrspace(3)* undef 637 %insert.4 = insertvalue { <3 x float>, i32 } %insert.3, i32 %load3, 1
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/AArch64/ |
D | ext-trunc.ll | 53 %load3 = load i64, i64* %gep3 54 call void @foo(i64 %load0, i64 %load1, i64 %load2, i64 %load3) 110 %load3 = load i64, i64* %gep3 111 call void @foo(i64 %load0, i64 %load1, i64 %load2, i64 %load3)
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | falkor-hwpf-fix.ll | 31 %load3 = load i32, i32* %gep3 36 %add2 = add i32 %load3, %load4
|
D | f16-convert.ll | 38 define double @load3(i16* nocapture readonly %a, i32 %i) nounwind { 39 ; CHECK-LABEL: load3:
|
/external/llvm-project/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/ |
D | gep-bitcast.ll | 24 %load3 = load i32, i32 addrspace(1)* %f2i3, align 4 53 %load3 = load i32, i32 addrspace(1)* %f2i3, align 4 79 %load3 = load i32, i32 addrspace(1)* %f2i3, align 4
|
D | adjust-alloca-alignment.ll | 186 %load3 = load i32, i32 addrspace(5)* %out.gep.3, align 1 206 %load3 = load i8, i8 addrspace(5)* %out.gep.3, align 1
|
/external/llvm-project/llvm/test/Transforms/Inline/ |
D | inlined-loop-metadata.ll | 80 %wide.load3 = load <4 x i32>, <4 x i32>* %14, align 4, !dbg !19, !tbaa !20 83 %16 = mul <4 x i32> %wide.load3, %step.add5, !dbg !19
|
/external/llvm/test/CodeGen/AArch64/ |
D | f16-convert.ll | 38 define double @load3(i16* nocapture readonly %a, i32 %i) nounwind { 39 ; CHECK-LABEL: load3:
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | hhvm-cc.ll | 224 %load3 = load i64, i64 *%t3 225 call hhvm_ccc void @stack_helper(i64 %c, i64 %load3, i64 42)
|
D | dag-update-nodetomatch.ll | 33 …%bf.load3 = load i32, i32* getelementptr inbounds (%struct.i, %struct.i* @l, i64 0, i32 0), align 4 34 %bf.shl = shl i32 %bf.load3, 7
|
/external/llvm/test/CodeGen/X86/ |
D | hhvm-cc.ll | 224 %load3 = load i64, i64 *%t3 225 call hhvm_ccc void @stack_helper(i64 %c, i64 %load3, i64 42)
|
/external/llvm/test/Analysis/ValueTracking/ |
D | memory-dereferenceable.ll | 38 %load3 = load i32, i32 addrspace(1)* %dparam
|