/external/libxaac/decoder/ |
D | ixheaacd_qmf_dec.c | 361 VOID ixheaacd_sbr_qmfanal32_winadd_eld(WORD16 *inp1, WORD16 *inp2, in ixheaacd_sbr_qmfanal32_winadd_eld() argument 390 accu = ixheaacd_mult16x16in32(inp2[n + 0], p_qmf2[(n + 0)]); in ixheaacd_sbr_qmfanal32_winadd_eld() 392 accu, ixheaacd_mult16x16in32(inp2[n + 64], p_qmf2[(n + 64)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 394 accu, ixheaacd_mult16x16in32(inp2[n + 128], p_qmf2[(n + 128)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 396 accu, ixheaacd_mult16x16in32(inp2[n + 192], p_qmf2[(n + 192)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 398 accu, ixheaacd_mult16x16in32(inp2[n + 256], p_qmf2[(n + 256)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 401 accu = ixheaacd_mult16x16in32(inp2[n + 1 + 0], p_qmf2[(n + 1 + 0)]); in ixheaacd_sbr_qmfanal32_winadd_eld() 403 accu, ixheaacd_mult16x16in32(inp2[n + 1 + 64], p_qmf2[(n + 1 + 64)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 405 accu, ixheaacd_mult16x16in32(inp2[n + 1 + 128], p_qmf2[(n + 1 + 128)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 407 accu, ixheaacd_mult16x16in32(inp2[n + 1 + 192], p_qmf2[(n + 1 + 192)])); in ixheaacd_sbr_qmfanal32_winadd_eld() [all …]
|
D | ixheaacd_qmf_dec.h | 86 VOID ixheaacd_esbr_qmfanal32_winadd(WORD32 *inp1, WORD32 *inp2, 201 VOID ixheaacd_sbr_qmfanal32_winadd_eld(WORD16 *inp1, WORD16 *inp2,
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-split-vsetcc.ll | 6 define void @gsl_sf_legendre_Pl_deriv_array(<4 x i32> %inp1, <4 x double> %inp2) #0 { 13 %2 = fmul <4 x double> %inp2, %1 14 %3 = fmul <4 x double> %inp2, %2 15 %4 = fmul <4 x double> %3, %inp2
|
D | lsr-postinc-pos.ll | 8 ; CHECK: %c1 = icmp ult i8* %scevgep, %inp2 13 define void @foo(i8** %inp, i8* %inp2) nounwind { 26 %c1 = icmp ult i8* %p, %inp2 ; <i1> [#uses=1]
|
D | subreg-postra.ll | 6 define void @jbd2_journal_commit_transaction(i32* %journal, i64 %inp1, i32 %inp2, 139 %.err.4 = select i1 %lnot404, i32 -5, i32 %inp2 154 %err.4.lcssa = phi i32 [ %inp2, %do.body378 ], [ %.err.4, %wait_on_buffer.exit1319 ]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | qpx-split-vsetcc.ll | 6 define void @gsl_sf_legendre_Pl_deriv_array(<4 x i32> %inp1, <4 x double> %inp2) #0 { 13 %2 = fmul <4 x double> %inp2, %1 14 %3 = fmul <4 x double> %inp2, %2 15 %4 = fmul <4 x double> %3, %inp2
|
D | lsr-postinc-pos.ll | 8 ; CHECK: %c1 = icmp ult i8* %scevgep, %inp2 13 define void @foo(i8** %inp, i8* %inp2) nounwind { 26 %c1 = icmp ult i8* %p, %inp2 ; <i1> [#uses=1]
|
D | subreg-postra.ll | 7 define void @jbd2_journal_commit_transaction(i32* %journal, i64 %inp1, i32 %inp2, 140 %.err.4 = select i1 %lnot404, i32 -5, i32 %inp2 160 %err.4.lcssa = phi i32 [ %inp2, %do.body378 ], [ %.err.4, %wait_on_buffer.exit1319 ]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_matmul_op.cc | 321 const bfloat16** inp2, in ScalarMulAdd3Way() argument 324 float inp2_f = ConvertBfloat16ToFloat(*inp2); in ScalarMulAdd3Way() 329 ++*inp2; in ScalarMulAdd3Way() 335 const float** inp2, const float** inp3, in ScalarMulAdd3Way() argument 337 **out += a1 * **inp1 + a2 * **inp2 + a3 * **inp3; in ScalarMulAdd3Way() 340 ++*inp2; in ScalarMulAdd3Way() 430 auto inp2 = reinterpret_cast<const float*>(*binp2); in MulAdd3Way() local 438 const auto b2 = LOAD(inp2); in MulAdd3Way() 463 auto inp2 = reinterpret_cast<const float*>(*binp2); in TwoMulAdd3Way() local 468 const auto b2 = LOAD(inp2); in TwoMulAdd3Way() [all …]
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | identity_n_op_py_test.py | 46 inp2 = constant_op.constant( 49 value2] = sess.run(array_ops.identity_n([inp0, inp1, inp2]))
|
D | rnn_cell_test.py | 1796 for inp1, inp2 in zip(flat_input1, flat_input2): 1800 self.assertEqual(input_shape, inp2.get_shape().as_list())
|
/external/libaom/libaom/aom_dsp/mips/ |
D | intrapred_msa.c | 106 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_16x16_msa() local 112 inp2 = src[2]; in intra_predict_horiz_16x16_msa() 118 src2 = (v16u8)__msa_fill_b(inp2); in intra_predict_horiz_16x16_msa() 129 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_32x32_msa() local 135 inp2 = src[2]; in intra_predict_horiz_32x32_msa() 141 src2 = (v16u8)__msa_fill_b(inp2); in intra_predict_horiz_32x32_msa()
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | txfm_macros_msa.h | 82 #define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \ argument 89 ILVRL_H2_SH(inp3, inp2, madd_s2_m, madd_s3_m); \
|
D | intrapred_msa.c | 104 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_16x16_msa() local 110 inp2 = src[2]; in intra_predict_horiz_16x16_msa() 116 src2 = (v16u8)__msa_fill_b(inp2); in intra_predict_horiz_16x16_msa() 127 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_32x32_msa() local 133 inp2 = src[2]; in intra_predict_horiz_32x32_msa() 139 src2 = (v16u8)__msa_fill_b(inp2); in intra_predict_horiz_32x32_msa()
|
D | inv_txfm_msa.h | 195 #define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \ argument 202 ILVRL_H2_SH(inp3, inp2, madd_s3_m, madd_s2_m); \
|
/external/libxaac/decoder/generic/ |
D | ixheaacd_qmf_dec_generic.c | 506 VOID ixheaacd_sbr_qmfanal32_winadd(WORD16 *inp1, WORD16 *inp2, WORD16 *p_qmf1, in ixheaacd_sbr_qmfanal32_winadd() argument 539 accu = ixheaacd_mult16x16in32(inp2[n + 0], p_qmf2[2 * (n + 0)]); in ixheaacd_sbr_qmfanal32_winadd() 541 accu, ixheaacd_mult16x16in32(inp2[n + 64], p_qmf2[2 * (n + 64)])); in ixheaacd_sbr_qmfanal32_winadd() 543 accu, ixheaacd_mult16x16in32(inp2[n + 128], p_qmf2[2 * (n + 128)])); in ixheaacd_sbr_qmfanal32_winadd() 545 accu, ixheaacd_mult16x16in32(inp2[n + 192], p_qmf2[2 * (n + 192)])); in ixheaacd_sbr_qmfanal32_winadd() 547 accu, ixheaacd_mult16x16in32(inp2[n + 256], p_qmf2[2 * (n + 256)])); in ixheaacd_sbr_qmfanal32_winadd() 550 accu = ixheaacd_mult16x16in32(inp2[n + 1 + 0], p_qmf2[2 * (n + 1 + 0)]); in ixheaacd_sbr_qmfanal32_winadd() 553 ixheaacd_mult16x16in32(inp2[n + 1 + 64], p_qmf2[2 * (n + 1 + 64)])); in ixheaacd_sbr_qmfanal32_winadd() 556 ixheaacd_mult16x16in32(inp2[n + 1 + 128], p_qmf2[2 * (n + 1 + 128)])); in ixheaacd_sbr_qmfanal32_winadd() 559 ixheaacd_mult16x16in32(inp2[n + 1 + 192], p_qmf2[2 * (n + 1 + 192)])); in ixheaacd_sbr_qmfanal32_winadd() [all …]
|
/external/tensorflow/tensorflow/python/keras/ |
D | callbacks_v1_test.py | 188 inp2 = keras.Input((INPUT_DIM,)) 189 inp = keras.layers.add([inp1, inp2]) 194 model = keras.models.Model([inp1, inp2], [output1, output2])
|
/external/libyuv/files/source/ |
D | row_msa.cc | 2018 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RGB24ToUVRow_MSA() local 2034 inp2 = (v16u8)__msa_ld_b((v16i8*)s, 32); in RGB24ToUVRow_MSA() 2040 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RGB24ToUVRow_MSA() 2042 src3 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp2, 4); in RGB24ToUVRow_MSA() 2122 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RAWToUVRow_MSA() local 2139 inp2 = (v16u8)__msa_ld_b((v16i8*)s, 32); in RAWToUVRow_MSA() 2145 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RAWToUVRow_MSA() 2147 src3 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp2, 4); in RAWToUVRow_MSA()
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 352 #define CMP_AND_SELECT(inp0, inp1, inp2, inp3, inp4, inp5, out0) \ argument 360 _sel_h1 = (v8i16) __msa_clt_u_h((v8u16) inp2, (v8u16) inp0); \
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_msa.cc | 2030 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RGB24ToUVRow_MSA() local 2046 inp2 = (v16u8)__msa_ld_b((const v16i8*)s, 32); in RGB24ToUVRow_MSA() 2052 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RGB24ToUVRow_MSA() 2054 src3 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp2, 4); in RGB24ToUVRow_MSA() 2134 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RAWToUVRow_MSA() local 2151 inp2 = (v16u8)__msa_ld_b((const v16i8*)s, 32); in RAWToUVRow_MSA() 2157 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RAWToUVRow_MSA() 2159 src3 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp2, 4); in RAWToUVRow_MSA()
|
/external/tensorflow/tensorflow/python/keras/engine/ |
D | topology_test.py | 363 inp2 = keras.layers.Input(shape=(2,)) 364 out2 = model(inp2)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vector-shuffle-combining-avx2.ll | 951 define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1, <8 x float> %inp2… 972 …%shuf0 = shufflevector <8 x float> %inp0, <8 x float> %inp2, <8 x i32> <i32 1, i32 10, i32 11, i32…
|