/external/libvpx/libvpx/vpx_dsp/mips/ |
D | avg_msa.c | 24 ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6); in vpx_avg_8x8_msa() 416 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa() 418 ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l, in vpx_int_pro_row_msa() 420 ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l, in vpx_int_pro_row_msa() 422 ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l, in vpx_int_pro_row_msa() 440 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa() 442 ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l, in vpx_int_pro_row_msa() 444 ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l, in vpx_int_pro_row_msa() 446 ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l, in vpx_int_pro_row_msa() 458 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa() [all …]
|
D | fwd_txfm_msa.c | 19 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa() 50 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column() 51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column() 159 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in fdct16x8_1d_row() 160 ADD4(in4, 1, in5, 1, in6, 1, in7, 1, in4, in5, in6, in7); in fdct16x8_1d_row() 161 ADD4(in8, 1, in9, 1, in10, 1, in11, 1, in8, in9, in10, in11); in fdct16x8_1d_row() 162 ADD4(in12, 1, in13, 1, in14, 1, in15, 1, in12, in13, in14, in15); in fdct16x8_1d_row() 208 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in vpx_fdct4x4_msa() 263 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct16x16_1_msa()
|
D | idct32x32_msa.c | 200 ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3); in idct32x8_row_odd_process_store() 214 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct32x8_row_odd_process_store() 228 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct32x8_row_odd_process_store() 255 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6); in idct_butterfly_transpose_store() 272 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); in idct_butterfly_transpose_store() 289 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); in idct_butterfly_transpose_store() 306 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7); in idct_butterfly_transpose_store() 502 ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3); in idct8x32_column_odd_process_store() 513 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct8x32_column_odd_process_store() 527 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct8x32_column_odd_process_store() [all …]
|
D | fwd_dct32x32_msa.c | 75 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 103 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_column_even_store() 181 ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31); in fdct8x32_1d_column_odd_store() 184 ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25); in fdct8x32_1d_column_odd_store() 231 ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19); in fdct8x32_1d_column_odd_store() 315 ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, tmp0_w, in fdct8x32_1d_row_even_4x() 318 ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, vec0_r, in fdct8x32_1d_row_even_4x() 362 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_row_even_4x() 406 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_row_even() 434 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_row_even() [all …]
|
D | inv_txfm_msa.h | 104 ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, res0_m, res1_m, \ 271 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ 289 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \ 307 ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m, \
|
D | idct16x16_msa.c | 36 ADD4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg2, reg14, reg6, in vpx_idct16_1d_rows_msa() 281 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in vpx_idct16x16_1_add_msa() 282 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7); in vpx_idct16x16_1_add_msa()
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | temporal_filter_msa.c | 87 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 133 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 211 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 258 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa()
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | temporal_filter_msa.c | 74 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 113 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa() 195 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa() 235 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa()
|
D | dct_msa.c | 192 ADD4(in0_w, 3, in1_w, 3, in2_w, 3, in3_w, 3, in0_w, in1_w, in2_w, in3_w); in vp8_short_walsh4x4_msa()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | idct_msa.c | 108 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in idct4x4_addblk_msa() 130 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in idct4x4_addconst_msa() 204 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in dequant_idct4x4_addblk_msa() 246 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in dequant_idct4x4_addblk_2x_msa() 287 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in dequant_idct_addconst_2x_msa()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | convert-rr-to-ri-instrs-R0-special-handling.mir | 127 %4:gprc = ADD4 killed $r0, killed %2 181 %4:gprc = ADD4 killed %3, killed $r0 235 %4:gprc = ADD4 killed $r0, killed %2 288 %4:gprc = ADD4 killed %2, killed $r0
|
D | convert-rr-to-ri-instrs.mir | 1052 %4 = ADD4 killed %3, %2 1053 %5 = ADD4 killed %2, killed %4 1829 %7 = ADD4 killed %6, %2 1888 %7 = ADD4 killed %6, killed %2 1950 %7 = ADD4 killed %6, %2 2013 %7 = ADD4 killed %6, killed %2 2091 %13 = ADD4 killed %12, killed %7 2170 %13 = ADD4 killed %12, killed %7 2248 %13 = ADD4 killed %12, killed %7 2325 %13 = ADD4 killed %12, killed %7 [all …]
|
/external/webp/src/dsp/ |
D | enc_msa.c | 65 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in ITransformOne() 425 ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); in TM4() 501 ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); in TrueMotion16x16() 502 ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); in TrueMotion16x16() 626 ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); in TrueMotion8x8() 828 ADD4(b0, t0, b1, t1, b2, t2, b3, t3, b0, b1, b2, b3); in QuantizeBlock_MSA()
|
D | dec_msa.c | 63 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in TransformOne() 141 ADD4(out0, tmp0, out1, tmp0, out2, tmp0, out3, tmp0, in TransformAC3() 149 ADD4(res0, out0, res1, out1, res2, out2, res3, out3, res0, res1, res2, res3); in TransformAC3() 702 ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); in TM4() 810 ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); in TM16() 811 ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); in TM16() 909 ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); in TM8uv()
|
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct4x4_msa.c | 93 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in vp9_fht4x4_msa()
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 291 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \ macro 381 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa() 401 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/AMDGPU/ |
D | reduction.ll | 64 ; VI-NEXT: [[ADD4:%.*]] = fadd fast half [[ELT4]], [[ADD3]] 65 ; VI-NEXT: [[ADD5:%.*]] = fadd fast half [[ELT5]], [[ADD4]] 126 ; VI-NEXT: [[ADD4:%.*]] = fadd fast half [[ELT4]], [[ADD3]] 127 ; VI-NEXT: [[ADD5:%.*]] = fadd fast half [[ELT5]], [[ADD4]] 262 ; VI-NEXT: [[ADD4:%.*]] = add i16 [[ELT4]], [[ADD3]] 263 ; VI-NEXT: [[ADD5:%.*]] = add i16 [[ELT5]], [[ADD4]]
|
/external/swiftshader/third_party/LLVM/lib/Target/PowerPC/ |
D | PPCRegisterInfo.cpp | 83 ImmToIdxMap[PPC::ADDI] = PPC::ADD4; in PPCRegisterInfo() 291 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4; in eliminateCallFramePseudoInstr()
|
D | PPCFrameLowering.cpp | 593 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD4)) in emitEpilogue() 666 unsigned ADDInstr = isPPC64 ? PPC::ADD8 : PPC::ADD4; in emitEpilogue()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/IndVarSimplify/ |
D | iv-widen-elim-ext.ll | 26 ; CHECK-NEXT: [[ADD4:%.*]] = add nsw i32 [[ADD3]], [[DIV0]] 28 ; CHECK-NEXT: store i32 [[ADD4]], i32* [[ARRAYIDX5]], align 4
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | horizontal-list.ll | 1356 ; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]] 1357 ; CHECK-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4]], [[CONV]] 1390 ; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]] 1391 ; THRESHOLD-NEXT: [[ADD5:%.*]] = fadd fast float [[ADD4]], [[CONV]] 1456 ; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]] 1457 ; CHECK-NEXT: [[ADD41:%.*]] = fadd fast float [[ADD4]], 5.000000e+00 1494 ; THRESHOLD-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]] 1495 ; THRESHOLD-NEXT: [[ADD41:%.*]] = fadd fast float [[ADD4]], 5.000000e+00 1568 ; CHECK-NEXT: [[ADD4:%.*]] = fadd fast float undef, [[ADD1]] 1569 ; CHECK-NEXT: [[ADD4_1:%.*]] = fadd fast float undef, [[ADD4]] [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCAsmPrinter.cpp | 559 TmpInst.setOpcode(PPC::ADD4); in EmitInstruction() 811 EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::ADD4) in EmitInstruction()
|
D | PPCFrameLowering.cpp | 1145 : PPC::ADD4 ); in emitEpilogue() 1827 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4; in eliminateCallFramePseudoInstr()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/ |
D | PPCAsmPrinter.cpp | 642 TmpInst.setOpcode(PPC::ADD4); in EmitInstruction() 900 EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::ADD4) in EmitInstruction()
|
D | PPCFrameLowering.cpp | 1275 : PPC::ADD4 ); in emitEpilogue() 2075 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4; in eliminateCallFramePseudoInstr()
|