/external/libxaac/decoder/ |
D | ixheaacd_qmf_dec.c | 361 VOID ixheaacd_sbr_qmfanal32_winadd_eld(WORD16 *inp1, WORD16 *inp2, in ixheaacd_sbr_qmfanal32_winadd_eld() argument 368 accu = ixheaacd_mult16x16in32(inp1[n + 0], p_qmf1[(n + 0)]); in ixheaacd_sbr_qmfanal32_winadd_eld() 370 accu, ixheaacd_mult16x16in32(inp1[n + 64], p_qmf1[(n + 64)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 372 accu, ixheaacd_mult16x16in32(inp1[n + 128], p_qmf1[(n + 128)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 374 accu, ixheaacd_mult16x16in32(inp1[n + 192], p_qmf1[(n + 192)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 376 accu, ixheaacd_mult16x16in32(inp1[n + 256], p_qmf1[(n + 256)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 379 accu = ixheaacd_mult16x16in32(inp1[n + 1 + 0], p_qmf1[(n + 1 + 0)]); in ixheaacd_sbr_qmfanal32_winadd_eld() 381 accu, ixheaacd_mult16x16in32(inp1[n + 1 + 64], p_qmf1[(n + 1 + 64)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 383 accu, ixheaacd_mult16x16in32(inp1[n + 1 + 128], p_qmf1[(n + 1 + 128)])); in ixheaacd_sbr_qmfanal32_winadd_eld() 385 accu, ixheaacd_mult16x16in32(inp1[n + 1 + 192], p_qmf1[(n + 1 + 192)])); in ixheaacd_sbr_qmfanal32_winadd_eld() [all …]
|
D | ixheaacd_qmf_dec.h | 86 VOID ixheaacd_esbr_qmfanal32_winadd(WORD32 *inp1, WORD32 *inp2, 181 VOID ixheaacd_sbr_qmfsyn64_winadd(WORD16 *tmp1, WORD16 *tmp2, WORD16 *inp1, 201 VOID ixheaacd_sbr_qmfanal32_winadd_eld(WORD16 *inp1, WORD16 *inp2,
|
/external/tensorflow/tensorflow/core/kernels/ |
D | sparse_matmul_op.cc | 320 const float a3, const bfloat16** inp1, in ScalarMulAdd3Way() argument 323 float inp1_f = ConvertBfloat16ToFloat(*inp1); in ScalarMulAdd3Way() 328 ++*inp1; in ScalarMulAdd3Way() 334 const float a3, const float** inp1, in ScalarMulAdd3Way() argument 337 **out += a1 * **inp1 + a2 * **inp2 + a3 * **inp3; in ScalarMulAdd3Way() 339 ++*inp1; in ScalarMulAdd3Way() 429 auto inp1 = reinterpret_cast<const float*>(*binp1); in MulAdd3Way() local 434 const auto b1 = LOAD(inp1); in MulAdd3Way() 462 auto inp1 = reinterpret_cast<const float*>(*binp1); in TwoMulAdd3Way() local 467 const auto b1 = LOAD(inp1); in TwoMulAdd3Way() [all …]
|
/external/libxaac/decoder/generic/ |
D | ixheaacd_qmf_dec_generic.c | 506 VOID ixheaacd_sbr_qmfanal32_winadd(WORD16 *inp1, WORD16 *inp2, WORD16 *p_qmf1, in ixheaacd_sbr_qmfanal32_winadd() argument 513 accu = ixheaacd_mult16x16in32(inp1[n + 0], p_qmf1[2 * (n + 0)]); in ixheaacd_sbr_qmfanal32_winadd() 515 accu, ixheaacd_mult16x16in32(inp1[n + 64], p_qmf1[2 * (n + 64)])); in ixheaacd_sbr_qmfanal32_winadd() 517 accu, ixheaacd_mult16x16in32(inp1[n + 128], p_qmf1[2 * (n + 128)])); in ixheaacd_sbr_qmfanal32_winadd() 519 accu, ixheaacd_mult16x16in32(inp1[n + 192], p_qmf1[2 * (n + 192)])); in ixheaacd_sbr_qmfanal32_winadd() 521 accu, ixheaacd_mult16x16in32(inp1[n + 256], p_qmf1[2 * (n + 256)])); in ixheaacd_sbr_qmfanal32_winadd() 524 accu = ixheaacd_mult16x16in32(inp1[n + 1 + 0], p_qmf1[2 * (n + 1 + 0)]); in ixheaacd_sbr_qmfanal32_winadd() 527 ixheaacd_mult16x16in32(inp1[n + 1 + 64], p_qmf1[2 * (n + 1 + 64)])); in ixheaacd_sbr_qmfanal32_winadd() 530 ixheaacd_mult16x16in32(inp1[n + 1 + 128], p_qmf1[2 * (n + 1 + 128)])); in ixheaacd_sbr_qmfanal32_winadd() 533 ixheaacd_mult16x16in32(inp1[n + 1 + 192], p_qmf1[2 * (n + 1 + 192)])); in ixheaacd_sbr_qmfanal32_winadd() [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | qpx-bv-sint.ll | 5 define void @s452(i32 %inp1) nounwind { 10 %conv.4 = sitofp i32 %inp1 to double 11 %conv.5 = sitofp i32 %inp1 to double
|
D | qpx-split-vsetcc.ll | 6 define void @gsl_sf_legendre_Pl_deriv_array(<4 x i32> %inp1, <4 x double> %inp2) #0 { 11 %0 = icmp ne <4 x i32> %inp1, zeroinitializer
|
D | subreg-postra.ll | 7 define void @jbd2_journal_commit_transaction(i32* %journal, i64 %inp1, i32 %inp2, 111 %tobool.i1295 = icmp eq i64 %inp1, 0 130 %tobool.i1316 = icmp eq i64 %inp1, 0
|
/external/llvm/test/CodeGen/PowerPC/ |
D | qpx-bv-sint.ll | 5 define void @s452(i32 %inp1) nounwind { 10 %conv.4 = sitofp i32 %inp1 to double 11 %conv.5 = sitofp i32 %inp1 to double
|
D | qpx-split-vsetcc.ll | 6 define void @gsl_sf_legendre_Pl_deriv_array(<4 x i32> %inp1, <4 x double> %inp2) #0 { 11 %0 = icmp ne <4 x i32> %inp1, zeroinitializer
|
D | subreg-postra.ll | 6 define void @jbd2_journal_commit_transaction(i32* %journal, i64 %inp1, i32 %inp2, 110 %tobool.i1295 = icmp eq i64 %inp1, 0 129 %tobool.i1316 = icmp eq i64 %inp1, 0
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 352 #define CMP_AND_SELECT(inp0, inp1, inp2, inp3, inp4, inp5, out0) \ argument 356 _sel_h0 = (v8i16) __msa_clt_u_h((v8u16) inp1, (v8u16) inp0); \ 358 inp0 = (v8i16) __msa_bmnz_v((v16u8) inp0, (v16u8) inp1, (v16u8) _sel_h0); \ 549 int32_t inp0, inp1, out0; in png_read_filter_row_avg4_msa() local 555 inp1 = LW(src); in png_read_filter_row_avg4_msa() 558 src1 = (v16u8) __msa_insert_w((v4i32) zero, 0, inp1); in png_read_filter_row_avg4_msa() 601 int32_t inp0, inp1, out1; in png_read_filter_row_avg3_msa() local 610 inp1 = LW(src); in png_read_filter_row_avg3_msa() 613 src1 = (v16u8) __msa_insert_w((v4i32) zero, 0, inp1); in png_read_filter_row_avg3_msa() 660 int32_t inp0, inp1, res0; in png_read_filter_row_paeth4_msa() local [all …]
|
/external/tensorflow/tensorflow/contrib/batching/python/ops/ |
D | batch_ops_test.py | 118 inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[1]) 120 [inp0, inp1], 131 inp1: [2]})) 135 main_results = sess.run([batched], feed_dict={inp0: [2], inp1: [3]}) 157 inp1 = array_ops.placeholder(dtype=dtypes.int32, shape=[2]) 159 [inp0, inp1], num_batch_threads=1, max_batch_size=2, 162 _ = sess.run([batched, index], feed_dict={inp0: [0], inp1: [1, 2]})
|
/external/tensorflow/tensorflow/python/kernel_tests/ |
D | identity_n_op_py_test.py | 45 inp1 = constant_op.constant([11, 21, 31, 41, 51, 61], shape=[3, 2]) 49 value2] = sess.run(array_ops.identity_n([inp0, inp1, inp2]))
|
D | rnn_cell_test.py | 1796 for inp1, inp2 in zip(flat_input1, flat_input2): 1797 input_shape = inp1.get_shape().as_list()
|
/external/libaom/libaom/aom_dsp/mips/ |
D | intrapred_msa.c | 106 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_16x16_msa() local 111 inp1 = src[1]; in intra_predict_horiz_16x16_msa() 117 src1 = (v16u8)__msa_fill_b(inp1); in intra_predict_horiz_16x16_msa() 129 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_32x32_msa() local 134 inp1 = src[1]; in intra_predict_horiz_32x32_msa() 140 src1 = (v16u8)__msa_fill_b(inp1); in intra_predict_horiz_32x32_msa()
|
/external/libxaac/decoder/armv8/ |
D | ixheaacd_qmf_dec_armv8.c | 1144 VOID ixheaacd_esbr_qmfsyn64_winadd(WORD32 *tmp1, WORD32 *tmp2, WORD32 *inp1, in ixheaacd_esbr_qmfsyn64_winadd() argument 1152 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp1[0 + k], inp1[k + 0])); in ixheaacd_esbr_qmfsyn64_winadd() 1154 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp1[256 + k], inp1[k + 128])); in ixheaacd_esbr_qmfsyn64_winadd() 1156 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp1[512 + k], inp1[k + 256])); in ixheaacd_esbr_qmfsyn64_winadd() 1158 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp1[768 + k], inp1[k + 384])); in ixheaacd_esbr_qmfsyn64_winadd() 1160 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp1[1024 + k], inp1[k + 512])); in ixheaacd_esbr_qmfsyn64_winadd() 1163 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp2[128 + k], inp1[k + 64])); in ixheaacd_esbr_qmfsyn64_winadd() 1165 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp2[384 + k], inp1[k + 192])); in ixheaacd_esbr_qmfsyn64_winadd() 1167 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp2[640 + k], inp1[k + 320])); in ixheaacd_esbr_qmfsyn64_winadd() 1169 ixheaacd_add64(syn_out, ixheaacd_mult64(tmp2[896 + k], inp1[k + 448])); in ixheaacd_esbr_qmfsyn64_winadd() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | txfm_macros_msa.h | 82 #define MADD_BF(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \ argument 88 ILVRL_H2_SH(inp1, inp0, madd_s0_m, madd_s1_m); \
|
D | intrapred_msa.c | 104 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_16x16_msa() local 109 inp1 = src[1]; in intra_predict_horiz_16x16_msa() 115 src1 = (v16u8)__msa_fill_b(inp1); in intra_predict_horiz_16x16_msa() 127 uint8_t inp0, inp1, inp2, inp3; in intra_predict_horiz_32x32_msa() local 132 inp1 = src[1]; in intra_predict_horiz_32x32_msa() 138 src1 = (v16u8)__msa_fill_b(inp1); in intra_predict_horiz_32x32_msa()
|
D | inv_txfm_msa.h | 195 #define VP9_MADD(inp0, inp1, inp2, inp3, cst0, cst1, cst2, cst3, out0, out1, \ argument 201 ILVRL_H2_SH(inp1, inp0, madd_s1_m, madd_s0_m); \
|
/external/tensorflow/tensorflow/python/keras/ |
D | callbacks_v1_test.py | 187 inp1 = keras.Input((INPUT_DIM,)) 189 inp = keras.layers.add([inp1, inp2]) 194 model = keras.models.Model([inp1, inp2], [output1, output2])
|
/external/libxaac/decoder/armv7/ |
D | ixheaacd_esbr_qmfsyn64_winadd.s | 4 @WORD32 *inp1, 9 @R2->Word32 *inp1
|
/external/libyuv/files/source/ |
D | row_msa.cc | 2018 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RGB24ToUVRow_MSA() local 2033 inp1 = (v16u8)__msa_ld_b((v16i8*)s, 16); in RGB24ToUVRow_MSA() 2038 src1 = (v16u8)__msa_sldi_b((v16i8)inp1, (v16i8)inp0, 12); in RGB24ToUVRow_MSA() 2040 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RGB24ToUVRow_MSA() 2122 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RAWToUVRow_MSA() local 2138 inp1 = (v16u8)__msa_ld_b((v16i8*)s, 16); in RAWToUVRow_MSA() 2143 src1 = (v16u8)__msa_sldi_b((v16i8)inp1, (v16i8)inp0, 12); in RAWToUVRow_MSA() 2145 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RAWToUVRow_MSA()
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_msa.cc | 2030 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RGB24ToUVRow_MSA() local 2045 inp1 = (v16u8)__msa_ld_b((const v16i8*)s, 16); in RGB24ToUVRow_MSA() 2050 src1 = (v16u8)__msa_sldi_b((v16i8)inp1, (v16i8)inp0, 12); in RGB24ToUVRow_MSA() 2052 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RGB24ToUVRow_MSA() 2134 v16u8 inp0, inp1, inp2, inp3, inp4, inp5; in RAWToUVRow_MSA() local 2150 inp1 = (v16u8)__msa_ld_b((const v16i8*)s, 16); in RAWToUVRow_MSA() 2155 src1 = (v16u8)__msa_sldi_b((v16i8)inp1, (v16i8)inp0, 12); in RAWToUVRow_MSA() 2157 src2 = (v16u8)__msa_sldi_b((v16i8)inp2, (v16i8)inp1, 8); in RAWToUVRow_MSA()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | vector-shuffle-combining-avx2.ll | 951 define internal fastcc <8 x float> @PR34577(<8 x float> %inp0, <8 x float> %inp1, <8 x float> %inp2… 975 …%shuf2 = shufflevector <8 x float> %inp1, <8 x float> %shuf1, <8 x i32> <i32 15, i32 10, i32 7, i3…
|