/external/libvpx/libvpx/vpx_dsp/mips/ |
D | itrans32_dspr2.c | 36 int load1, load2, load3, load4; in idct32_rows_dspr2() local 148 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 208 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 268 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 324 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 380 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 436 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 662 [load4] "=&r"(load4), [result1] "=&r"(result1), in idct32_rows_dspr2() 721 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in idct32_rows_dspr2() 875 [temp3] "=&r"(temp3), [load4] "=&r"(load4) in idct32_rows_dspr2() [all …]
|
D | itrans32_cols_dspr2.c | 32 int load1, load2, load3, load4; in vpx_idct32_cols_add_blk_dspr2() local 104 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2() 164 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2() 224 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2() 280 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2() 336 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2() 392 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2() 618 [load4] "=&r"(load4), [result1] "=&r"(result1), in vpx_idct32_cols_add_blk_dspr2() 677 [load4] "=&r"(load4), [temp0] "=&r"(temp0), [temp1] "=&r"(temp1), in vpx_idct32_cols_add_blk_dspr2()
|
D | convolve8_vert_dspr2.c | 30 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local 153 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), in convolve_vert_4_dspr2() 178 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local 302 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), in convolve_vert_64_dspr2()
|
D | convolve8_avg_dspr2.c | 31 uint32_t load1, load2, load3, load4; in convolve_avg_vert_4_dspr2() local 162 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), in convolve_avg_vert_4_dspr2() 187 uint32_t load1, load2, load3, load4; in convolve_avg_vert_64_dspr2() local 319 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), in convolve_avg_vert_64_dspr2()
|
D | itrans16_dspr2.c | 25 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_rows_dspr2() local 68 [load4] "=&r"(load4), [result1] "=&r"(result1), in idct16_rows_dspr2() 190 [load4] "=&r"(load4), [result1] "=&r"(result1), in idct16_rows_dspr2() 400 int load1, load2, load3, load4, load5, load6, load7, load8; in idct16_cols_add_blk_dspr2() local 453 [load4] "=&r"(load4), [result1] "=&r"(result1), in idct16_cols_add_blk_dspr2() 575 [load4] "=&r"(load4), [result1] "=&r"(result1), in idct16_cols_add_blk_dspr2()
|
/external/llvm/test/Transforms/SLPVectorizer/X86/ |
D | propagate_ir_flags.ll | 21 %load4 = load i32, i32* %idx4, align 4 26 %op4 = lshr exact i32 %load4, 1 47 %load4 = load i32, i32* %idx4, align 4 52 %op4 = lshr exact i32 %load4, 1 73 %load4 = load i32, i32* %idx4, align 4 78 %op4 = add nsw i32 %load4, 1 99 %load4 = load i32, i32* %idx4, align 4 104 %op4 = add i32 %load4, 1 125 %load4 = load i32, i32* %idx4, align 4 130 %op4 = add nuw i32 %load4, 1 [all …]
|
/external/pdfium/third_party/libpng16/intel/ |
D | filter_sse2_intrinsics.c | 30 static __m128i load4(const void* p) { in load4() function 67 a = d; d = load4(row); in png_read_filter_row_sub3_sse2() 100 a = d; d = load4(row); in png_read_filter_row_sub4_sse2() 130 b = load4(prev); in png_read_filter_row_avg3_sse2() 131 a = d; d = load4(row ); in png_read_filter_row_avg3_sse2() 183 b = load4(prev); in png_read_filter_row_avg4_sse2() 184 a = d; d = load4(row ); in png_read_filter_row_avg4_sse2() 258 c = b; b = _mm_unpacklo_epi8(load4(prev), zero); in png_read_filter_row_paeth3_sse2() 259 a = d; d = _mm_unpacklo_epi8(load4(row ), zero); in png_read_filter_row_paeth3_sse2() 357 c = b; b = _mm_unpacklo_epi8(load4(prev), zero); in png_read_filter_row_paeth4_sse2() [all …]
|
/external/libpng/intel/ |
D | filter_sse2_intrinsics.c | 30 static __m128i load4(const void* p) { in load4() function 67 a = d; d = load4(row); in png_read_filter_row_sub3_sse2() 100 a = d; d = load4(row); in png_read_filter_row_sub4_sse2() 130 b = load4(prev); in png_read_filter_row_avg3_sse2() 131 a = d; d = load4(row ); in png_read_filter_row_avg3_sse2() 183 b = load4(prev); in png_read_filter_row_avg4_sse2() 184 a = d; d = load4(row ); in png_read_filter_row_avg4_sse2() 258 c = b; b = _mm_unpacklo_epi8(load4(prev), zero); in png_read_filter_row_paeth3_sse2() 259 a = d; d = _mm_unpacklo_epi8(load4(row ), zero); in png_read_filter_row_paeth3_sse2() 357 c = b; b = _mm_unpacklo_epi8(load4(prev), zero); in png_read_filter_row_paeth4_sse2() [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | convolve8_vert_dspr2.c | 32 uint32_t load1, load2, load3, load4; in convolve_vert_4_dspr2() local 155 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), in convolve_vert_4_dspr2() 180 uint32_t load1, load2, load3, load4; in convolve_vert_64_dspr2() local 304 [load4] "=&r"(load4), [p1] "=&r"(p1), [p2] "=&r"(p2), in convolve_vert_64_dspr2()
|
/external/llvm-project/llvm/test/CodeGen/ARM/ |
D | optselect-regclass.ll | 12 …%bf.load4 = load i32, i32* getelementptr inbounds (%union.opcode.0.2.5.8.15.28, %union.opcode.0.2.… 13 %bf.clear10 = and i32 %bf.load4, 65535 14 %and11 = and i32 %bf.load4, 32768
|
/external/llvm/test/CodeGen/ARM/ |
D | optselect-regclass.ll | 12 …%bf.load4 = load i32, i32* getelementptr inbounds (%union.opcode.0.2.5.8.15.28, %union.opcode.0.2.… 13 %bf.clear10 = and i32 %bf.load4, 65535 14 %and11 = and i32 %bf.load4, 32768
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | horizontal-store.ll | 46 …%load4 = load i32, i32* getelementptr inbounds ([32 x i32], [32 x i32]* @arr, i64 0, i64 3), align… 47 %cmp3 = icmp sgt i32 %select2, %load4 48 %select3 = select i1 %cmp3, i32 %select2, i32 %load4 93 …%load4 = load i64, i64* getelementptr inbounds ([32 x i64], [32 x i64]* @arr64, i64 0, i64 3), ali… 94 %cmp3 = icmp slt i64 %select2, %load4 95 %select3 = select i1 %cmp3, i64 %select2, i64 %load4 144 …%load4 = load float, float* getelementptr inbounds ([32 x float], [32 x float]* @farr, i64 0, i64 … 145 %cmp3 = fcmp fast ogt float %select2, %load4 146 %select3 = select i1 %cmp3, float %select2, float %load4 195 …%load4 = load double, double* getelementptr inbounds ([32 x double], [32 x double]* @darr, i64 0, … [all …]
|
/external/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/ |
D | propagate_ir_flags.ll | 32 %load4 = load i32, i32* %idx4, align 4 37 %op4 = lshr exact i32 %load4, 1 68 %load4 = load i32, i32* %idx4, align 4 73 %op4 = lshr exact i32 %load4, 1 104 %load4 = load i32, i32* %idx4, align 4 109 %op4 = add nsw i32 %load4, 1 140 %load4 = load i32, i32* %idx4, align 4 145 %op4 = add i32 %load4, 1 176 %load4 = load i32, i32* %idx4, align 4 181 %op4 = add nuw i32 %load4, 1 [all …]
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | promote-constOffset-to-imm.ll | 54 %load4 = load i64, i64 addrspace(1)* %add.ptr8.3, align 8 55 %add.3 = add i64 %load4, %add.2 264 %load4 = load i32, i32 addrspace(1)* %add.ptr8.3, align 4 265 %add.3 = add i32 %load4, %add.2 333 %load4 = load i64, i64 addrspace(1)* %addr4, align 8 334 %add4 = add i64 %load4, %add2 340 ; TODO: Support load4 as anchor instruction. 379 %load4 = load i32, i32 addrspace(1)* %addr4, align 8 380 %add4 = add i32 %load4, %add2 430 %load4 = load i64, i64 addrspace(1)* %add.ptr8.5, align 8 [all …]
|
/external/llvm-project/llvm/test/Transforms/NewGVN/ |
D | pr32836.ll | 41 %bf.load4 = load i32, i32* getelementptr (%struct.anon, %struct.anon* @b, i64 0, i32 0) 42 %tmp4 = and i32 %bf.load4, 536870911
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | falkor-hwpf-fix.ll | 34 %load4 = load i32, i32* %gep4 36 %add2 = add i32 %load3, %load4
|
D | f16-convert.ll | 51 define float @load4(i16* nocapture readonly %a, i64 %i) nounwind { 52 ; CHECK-LABEL: load4:
|
/external/llvm/test/Instrumentation/AddressSanitizer/ |
D | experiment-call.ll | 25 define void @load4(i32* %p) sanitize_address { 29 ; CHECK-LABEL: define void @load4
|
D | experiment.ll | 25 define void @load4(i32* %p) sanitize_address { 29 ; CHECK-LABEL: define void @load4
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | pr35763.ll | 38 …%bf.load4 = load i40, i40* bitcast ([5 x i8]* getelementptr inbounds (%struct.S, %struct.S* bitcas… 39 %bf.clear5 = and i40 %bf.load4, -8589869057
|
D | masked_gather.ll | 46 ; SSE-NEXT: .LBB0_5: # %cond.load4 85 ; AVX1-NEXT: .LBB0_5: # %cond.load4 125 ; AVX2-NEXT: .LBB0_5: # %cond.load4 190 ; SSE-NEXT: .LBB1_5: # %cond.load4 239 ; AVX1-NEXT: .LBB1_5: # %cond.load4 284 ; AVX2-NEXT: .LBB1_5: # %cond.load4 350 ; SSE-NEXT: .LBB2_5: # %cond.load4 397 ; AVX1-NEXT: .LBB2_5: # %cond.load4 441 ; AVX2-NEXT: .LBB2_5: # %cond.load4 505 ; SSE-NEXT: # %bb.5: # %cond.load4 [all …]
|
/external/llvm-project/llvm/test/Instrumentation/AddressSanitizer/ |
D | experiment.ll | 26 define void @load4(i32* %p) sanitize_address { 30 ; CHECK-LABEL: define void @load4
|
D | experiment-call.ll | 26 define void @load4(i32* %p) sanitize_address { 30 ; CHECK-LABEL: define void @load4
|
/external/llvm-project/llvm/test/Transforms/LoadStoreVectorizer/AMDGPU/ |
D | gep-bitcast.ll | 29 %load4 = load i32, i32 addrspace(1)* %f2i4, align 4 58 %load4 = load i32, i32 addrspace(1)* %f2i4, align 4 84 %load4 = load i32, i32 addrspace(1)* %f2i4, align 4
|
/external/llvm/test/CodeGen/AArch64/ |
D | f16-convert.ll | 51 define float @load4(i16* nocapture readonly %a, i64 %i) nounwind { 52 ; CHECK-LABEL: load4:
|