Home
last modified time | relevance | path

Searched refs:ADD4 (Results 1 – 25 of 32) sorted by relevance

12

/external/libvpx/libvpx/vpx_dsp/mips/
Dfwd_txfm_msa.c35 ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3); in fdct8x16_1d_column()
36 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column()
144 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in fdct16x8_1d_row()
145 ADD4(in4, 1, in5, 1, in6, 1, in7, 1, in4, in5, in6, in7); in fdct16x8_1d_row()
146 ADD4(in8, 1, in9, 1, in10, 1, in11, 1, in8, in9, in10, in11); in fdct16x8_1d_row()
147 ADD4(in12, 1, in13, 1, in14, 1, in15, 1, in12, in13, in14, in15); in fdct16x8_1d_row()
193 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in vpx_fdct4x4_msa()
223 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct8x8_1_msa()
261 ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6); in vpx_fdct16x16_1_msa()
Davg_msa.c24 ADD4(sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7, sum0, sum2, sum4, sum6); in vpx_avg_8x8_msa()
414 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa()
416 ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l, in vpx_int_pro_row_msa()
418 ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l, in vpx_int_pro_row_msa()
420 ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l, in vpx_int_pro_row_msa()
438 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa()
440 ADD4(hbuf_r, ref2_r, hbuf_l, ref2_l, hbuf_r, ref3_r, hbuf_l, ref3_l, in vpx_int_pro_row_msa()
442 ADD4(hbuf_r, ref4_r, hbuf_l, ref4_l, hbuf_r, ref5_r, hbuf_l, ref5_l, in vpx_int_pro_row_msa()
444 ADD4(hbuf_r, ref6_r, hbuf_l, ref6_l, hbuf_r, ref7_r, hbuf_l, ref7_l, in vpx_int_pro_row_msa()
456 ADD4(hbuf_r, ref0_r, hbuf_l, ref0_l, hbuf_r, ref1_r, hbuf_l, ref1_l, in vpx_int_pro_row_msa()
[all …]
Didct32x32_msa.c200 ADD4(reg1, reg2, reg6, reg5, reg0, reg3, reg7, reg4, vec1, vec2, vec0, vec3); in idct32x8_row_odd_process_store()
214 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct32x8_row_odd_process_store()
228 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct32x8_row_odd_process_store()
255 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6); in idct_butterfly_transpose_store()
272 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); in idct_butterfly_transpose_store()
289 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); in idct_butterfly_transpose_store()
306 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n1, n5, n3, n7); in idct_butterfly_transpose_store()
502 ADD4(reg0, reg3, reg1, reg2, reg5, reg6, reg4, reg7, vec0, vec1, vec2, vec3); in idct8x32_column_odd_process_store()
513 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct8x32_column_odd_process_store()
527 ADD4(reg0, reg4, reg1, reg5, reg2, reg6, reg3, reg7, loc0, loc1, loc2, loc3); in idct8x32_column_odd_process_store()
[all …]
Dfwd_dct32x32_msa.c75 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_column_even_store()
103 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_column_even_store()
181 ADD4(in16, in23, in17, in22, in30, in25, in31, in24, in16, in17, in30, in31); in fdct8x32_1d_column_odd_store()
184 ADD4(in16, in19, in17, in18, in30, in29, in31, in28, in27, in22, in21, in25); in fdct8x32_1d_column_odd_store()
231 ADD4(in22, in21, in23, in20, in24, in27, in25, in26, in16, in29, in30, in19); in fdct8x32_1d_column_odd_store()
315 ADD4(vec0_r, vec7_r, vec1_r, vec6_r, vec2_r, vec5_r, vec3_r, vec4_r, tmp0_w, in fdct8x32_1d_row_even_4x()
318 ADD4(vec0_l, vec7_l, vec1_l, vec6_l, vec2_l, vec5_l, vec3_l, vec4_l, vec0_r, in fdct8x32_1d_row_even_4x()
362 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_row_even_4x()
406 ADD4(vec0, vec7, vec1, vec6, vec2, vec5, vec3, vec4, in0, in1, in2, in3); in fdct8x32_1d_row_even()
434 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_row_even()
[all …]
Dinv_txfm_msa.h104 ADD4(res0_m, in0, res1_m, in1, res2_m, in2, res3_m, in3, res0_m, res1_m, \
271 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \
289 ADD4(r0_m, r4_m, r1_m, r5_m, r2_m, r6_m, r3_m, r7_m, m0_m, m1_m, m2_m, \
307 ADD4(r0_m, r6_m, r1_m, r7_m, r2_m, r4_m, r3_m, r5_m, m0_m, m1_m, m2_m, \
Didct16x16_msa.c36 ADD4(reg2, loc1, reg14, loc0, reg6, loc3, reg10, loc2, reg2, reg14, reg6, in vpx_idct16_1d_rows_msa()
281 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in vpx_idct16x16_1_add_msa()
282 ADD4(res4, vec, res5, vec, res6, vec, res7, vec, res4, res5, res6, res7); in vpx_idct16x16_1_add_msa()
Dmacros_msa.h1683 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ macro
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Dtemporal_filter_msa.c74 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa()
113 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_16size_msa()
195 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa()
235 ADD4(mod0_w, acc0, mod1_w, acc1, mod2_w, acc2, mod3_w, acc3, mod0_w, mod1_w, in temporal_filter_apply_8size_msa()
Ddct_msa.c192 ADD4(in0_w, 3, in1_w, 3, in2_w, 3, in3_w, 3, in0_w, in1_w, in2_w, in3_w); in vp8_short_walsh4x4_msa()
/external/libvpx/libvpx/vp8/common/mips/msa/
Didct_msa.c108 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in idct4x4_addblk_msa()
130 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in idct4x4_addconst_msa()
204 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in dequant_idct4x4_addblk_msa()
246 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in dequant_idct4x4_addblk_2x_msa()
287 ADD4(res0, vec, res1, vec, res2, vec, res3, vec, res0, res1, res2, res3); in dequant_idct_addconst_2x_msa()
Dvp8_macros_msa.h1456 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, out0, out1, out2, out3) \ macro
/external/libvpx/libvpx/vp9/encoder/mips/msa/
Dvp9_fdct4x4_msa.c93 ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3); in vp9_fht4x4_msa()
/external/webp/src/dsp/
Denc_msa.c65 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in ITransformOne()
421 ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); in TM4()
497 ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); in TrueMotion16x16()
498 ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); in TrueMotion16x16()
622 ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); in TrueMotion8x8()
824 ADD4(b0, t0, b1, t1, b2, t2, b3, t3, b0, b1, b2, b3); in QuantizeBlock()
Ddec_msa.c63 ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3); in TransformOne()
141 ADD4(out0, tmp0, out1, tmp0, out2, tmp0, out3, tmp0, in TransformAC3()
149 ADD4(res0, out0, res1, out1, res2, out2, res3, out3, res0, res1, res2, res3); in TransformAC3()
701 ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3); in TM4()
809 ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3); in TM16()
810 ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7); in TM16()
908 ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3); in TM8uv()
Dmsa_macro.h1172 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \ macro
/external/libpng/mips/
Dfilter_msa_intrinsics.c291 #define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \ macro
381 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa()
401 ADD4(src0, src4, src1, src5, src2, src6, src3, src7, in png_read_filter_row_up_msa()
/external/swiftshader/third_party/LLVM/lib/Target/PowerPC/
DPPCRegisterInfo.cpp83 ImmToIdxMap[PPC::ADDI] = PPC::ADD4; in PPCRegisterInfo()
291 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4; in eliminateCallFramePseudoInstr()
DPPCFrameLowering.cpp593 BuildMI(MBB, MBBI, dl, TII.get(PPC::ADD4)) in emitEpilogue()
666 unsigned ADDInstr = isPPC64 ? PPC::ADD8 : PPC::ADD4; in emitEpilogue()
DPPCISelLowering.cpp4790 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) in EmitPartwordAtomicBinary()
4911 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); in EmitInstrWithCustomInserter()
4913 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); in EmitInstrWithCustomInserter()
4915 BB = EmitAtomicBinary(MI, BB, false, PPC::ADD4); in EmitInstrWithCustomInserter()
5122 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) in EmitInstrWithCustomInserter()
/external/llvm/lib/Target/PowerPC/
DPPCAsmPrinter.cpp559 TmpInst.setOpcode(PPC::ADD4); in EmitInstruction()
811 EmitToStreamer(*OutStreamer, MCInstBuilder(PPC::ADD4) in EmitInstruction()
DPPCFrameLowering.cpp1145 : PPC::ADD4 ); in emitEpilogue()
1827 unsigned ADDInstr = is64Bit ? PPC::ADD8 : PPC::ADD4; in eliminateCallFramePseudoInstr()
DPPCFastISel.cpp1193 Opc = IsGPRC ? PPC::ADD4 : PPC::ADD8; in SelectBinaryIntOp()
1216 case PPC::ADD4: in SelectBinaryIntOp()
DPPCRegisterInfo.cpp72 ImmToIdxMap[PPC::ADDI] = PPC::ADD4; in PPCRegisterInfo()
DPPCISelLowering.cpp8581 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) in EmitPartwordAtomicBinary()
9070 BB = EmitPartwordAtomicBinary(MI, BB, true, PPC::ADD4); in EmitInstrWithCustomInserter()
9072 BB = EmitPartwordAtomicBinary(MI, BB, false, PPC::ADD4); in EmitInstrWithCustomInserter()
9074 BB = EmitAtomicBinary(MI, BB, 4, PPC::ADD4); in EmitInstrWithCustomInserter()
9306 BuildMI(BB, dl, TII->get(is64bit ? PPC::ADD8 : PPC::ADD4), Ptr1Reg) in EmitInstrWithCustomInserter()
/external/boringssl/mac-x86_64/crypto/fipsmodule/
Dbsaes-x86_64.S2476 L$ADD4:

12