/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_txfm_msa.c | 32 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x16_1d_column() local 44 LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in fdct8x16_1d_column() 48 SLLI_4V(in8, in9, in10, in11, 2); in fdct8x16_1d_column() 51 ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7); in fdct8x16_1d_column() 56 SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8); in fdct8x16_1d_column() 74 BUTTERFLY_4(in8, in9, stp22, stp23, stp30, stp31, stp32, stp33); in fdct8x16_1d_column() 86 BUTTERFLY_4(stp30, stp37, stp26, stp21, in8, in15, in14, in9); in fdct8x16_1d_column() 87 ILVRL_H2_SH(in15, in8, vec1, vec0); in fdct8x16_1d_column() 91 in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0); in fdct8x16_1d_column() 92 ST_SH(in8, tmp_ptr); in fdct8x16_1d_column() [all …]
|
D | fwd_dct32x32_msa.c | 61 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x32_1d_column_even_store() local 71 LD_SH4(input + 64, 8, in8, in9, in10, in11); in fdct8x32_1d_column_even_store() 72 BUTTERFLY_8(in4, in5, in6, in7, in8, in9, in10, in11, vec4, vec5, vec6, vec7, in fdct8x32_1d_column_even_store() 73 in8, in9, in10, in11); in fdct8x32_1d_column_even_store() 104 ADD4(in8, vec3, in9, vec2, in14, vec5, in15, vec4, in0, vec1, vec6, in2); in fdct8x32_1d_column_even_store() 120 SUB4(in8, vec3, in15, vec4, in3, in2, in0, in1, in3, in0, vec2, vec5); in fdct8x32_1d_column_even_store() 257 v8i16 in8, in9, in10, in11, in12, in13, in14, in15; in fdct8x32_1d_row_load_butterfly() local 261 LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly() 264 TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, in fdct8x32_1d_row_load_butterfly() 266 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in fdct8x32_1d_row_load_butterfly() [all …]
|
D | deblock_msa.c | 74 #define TRANSPOSE12x16_B(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ argument 86 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \ 87 ILVR_B2_SH(in9, in8, in11, in10, temp4, temp5); \ 101 ILVL_B4_SH(in9, in8, in11, in10, in13, in12, in15, in14, temp2, temp3, \ 106 in8 = (v16u8)__msa_ilvr_d((v2i64)temp1, (v2i64)temp0); \ 113 #define VPX_TRANSPOSE12x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, \ argument 134 in8 = (v16u8)temp6; \
|
D | macros_msa.h | 1073 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument 1079 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \ 1690 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument 1702 out7 = in7 + in8; \ 1704 out8 = in7 - in8; \ 1742 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ argument 1749 ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
|
D | vpx_convolve8_msa.c | 829 v16u8 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in12; in transpose16x16_to_dst() local 834 LD_UB8(src + 16 * 8, 16, in8, in9, in10, in11, in12, in13, in14, in15); in transpose16x16_to_dst() 836 TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in transpose16x16_to_dst() 844 SLDI_B4_0_UB(in8, in9, in10, in11, in8, in9, in10, in11, 8); in transpose16x16_to_dst() 847 TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in transpose16x16_to_dst()
|
/external/libaom/libaom/av1/common/x86/ |
D | intra_edge_sse4.c | 146 __m128i in8 = _mm_lddqu_si128((__m128i *)&in[8]); in av1_filter_intra_edge_high_sse4_1() local 149 __m128i in1 = _mm_alignr_epi8(in8, in0, 2); in av1_filter_intra_edge_high_sse4_1() 150 __m128i in2 = _mm_alignr_epi8(in8, in0, 4); in av1_filter_intra_edge_high_sse4_1() 166 in0 = in8; in av1_filter_intra_edge_high_sse4_1() 167 in8 = _mm_lddqu_si128((__m128i *)&in[8]); in av1_filter_intra_edge_high_sse4_1() 175 __m128i in8 = _mm_lddqu_si128((__m128i *)&in[8]); in av1_filter_intra_edge_high_sse4_1() local 178 __m128i in1 = _mm_alignr_epi8(in8, in0, 2); in av1_filter_intra_edge_high_sse4_1() 179 __m128i in2 = _mm_alignr_epi8(in8, in0, 4); in av1_filter_intra_edge_high_sse4_1() 180 __m128i in3 = _mm_alignr_epi8(in8, in0, 6); in av1_filter_intra_edge_high_sse4_1() 181 __m128i in4 = _mm_alignr_epi8(in8, in0, 8); in av1_filter_intra_edge_high_sse4_1() [all …]
|
/external/tensorflow/tensorflow/core/kernels/ |
D | aggregate_ops_gpu.cu.cc | 110 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) { in operator ()() 112 in7, in8); in operator ()() 123 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) { in operator ()() 125 in7, in8); in operator ()() 136 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8, in operator ()() 139 in7, in8, in9); in operator ()()
|
D | aggregate_ops_cpu.h | 104 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) { 106 in7, in8); 117 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) { 119 in7, in8); 130 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8, 133 in7, in8, in9);
|
D | aggregate_ops.h | 161 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8); 171 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) { in Compute() 172 out.device(d) = in1 + in2 + in3 + in4 + in5 + in6 + in7 + in8; in Compute() 185 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8); 195 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8) { in Compute() 196 out.device(d) += in1 + in2 + in3 + in4 + in5 + in6 + in7 + in8; in Compute() 207 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8, 218 typename TTypes<T>::ConstFlat in7, typename TTypes<T>::ConstFlat in8, in Compute() 220 out.device(d) = in1 + in2 + in3 + in4 + in5 + in6 + in7 + in8 + in9; in Compute()
|
/external/llvm-project/llvm/test/CodeGen/Hexagon/ |
D | circ_ldd_bug.ll | 86 %var8.0.in8.unr = phi i8* [ %4, %unr.cmp ], [ %11, %for.body.unr ] 88 %16 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr, i8* %3, i32 %or, i32 -8) 100 %var8.0.in8.unr19 = phi i8* [ %4, %unr.cmp24 ], [ %16, %for.body.unr13 ] 102 %21 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr19, i8* %3, i32 %or, i32 -8) 114 %var8.0.in8.unr28 = phi i8* [ %4, %unr.cmp33 ], [ %21, %for.body.unr17 ] 116 %26 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr28, i8* %3, i32 %or, i32 -8) 128 %var8.0.in8.unr37 = phi i8* [ %4, %unr.cmp42 ], [ %26, %for.body.unr26 ] 130 %31 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr37, i8* %3, i32 %or, i32 -8) 142 %var8.0.in8.unr46 = phi i8* [ %4, %unr.cmp51 ], [ %31, %for.body.unr35 ] 144 %36 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr46, i8* %3, i32 %or, i32 -8) [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | circ_ldd_bug.ll | 86 %var8.0.in8.unr = phi i8* [ %4, %unr.cmp ], [ %11, %for.body.unr ] 88 %16 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr, i8* %3, i32 %or, i32 -8) 100 %var8.0.in8.unr19 = phi i8* [ %4, %unr.cmp24 ], [ %16, %for.body.unr13 ] 102 %21 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr19, i8* %3, i32 %or, i32 -8) 114 %var8.0.in8.unr28 = phi i8* [ %4, %unr.cmp33 ], [ %21, %for.body.unr17 ] 116 %26 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr28, i8* %3, i32 %or, i32 -8) 128 %var8.0.in8.unr37 = phi i8* [ %4, %unr.cmp42 ], [ %26, %for.body.unr26 ] 130 %31 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr37, i8* %3, i32 %or, i32 -8) 142 %var8.0.in8.unr46 = phi i8* [ %4, %unr.cmp51 ], [ %31, %for.body.unr35 ] 144 %36 = call i8* @llvm.hexagon.circ.ldd(i8* %var8.0.in8.unr46, i8* %3, i32 %or, i32 -8) [all …]
|
/external/webp/src/dsp/ |
D | enc_sse41.c | 210 __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]); in DoQuantizeBlock_SSE41() local 218 __m128i coeff8 = _mm_abs_epi16(in8); in DoQuantizeBlock_SSE41() 266 out8 = _mm_sign_epi16(out8, in8); in DoQuantizeBlock_SSE41() 270 in8 = _mm_mullo_epi16(out8, q8); in DoQuantizeBlock_SSE41() 273 _mm_storeu_si128((__m128i*)&in[8], in8); in DoQuantizeBlock_SSE41()
|
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct16x16_msa.c | 369 v8i16 in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11; in postproc_fdct16x8_1d_row() local 374 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row() 377 TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9, in postproc_fdct16x8_1d_row() 383 FDCT_POSTPROC_2V_NEG_H(in8, in9); in postproc_fdct16x8_1d_row() 387 BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11, in postproc_fdct16x8_1d_row() 389 tmp7, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row() 391 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); in postproc_fdct16x8_1d_row() 395 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row() 396 FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3, in postproc_fdct16x8_1d_row()
|
/external/llvm-project/llvm/test/Transforms/Reassociate/ |
D | long-chains.ll | 4 define i8 @longchain(i8 %in1, i8 %in2, i8 %in3, i8 %in4, i8 %in5, i8 %in6, i8 %in7, i8 %in8, i8 %in… 13 %tmp9 = add i8 %tmp8, %in8
|
/external/llvm-project/llvm/test/Transforms/LICM/AArch64/ |
D | sve-load-hoist.ll | 3 define void @no_hoist_load1_nxv2i64(<vscale x 2 x i64>* %out, i8* %in8, i32 %n) { 11 %invst = call {}* @llvm.invariant.start.p0i8(i64 16, i8* %in8) 12 %in = bitcast i8* %in8 to <vscale x 2 x i64>*
|
/external/llvm-project/llvm/test/CodeGen/AMDGPU/ |
D | amdhsa-trap-num-sgprs.ll | 16 i32 addrspace(1)* %out8, i32 %in8, 47 store i32 %in8, i32 addrspace(1)* %out8
|
D | hsa-metadata-kernel-code-props.ll | 74 i32 %in8, i32 %in9, i32 %ina, i32 %inb, [8 x i32], 85 store i32 %in8, i32 addrspace(1)* %out8
|
D | hsa-metadata-kernel-code-props-v3.ll | 66 i32 %in8, i32 %in9, i32 %ina, i32 %inb, [8 x i32], 77 store i32 %in8, i32 addrspace(1)* %out8
|
/external/angle/third_party/vulkan-deps/glslang/src/Test/ |
D | decls.frag | 17 int in8[4] = int[](21, 22, 23, 24), ip;
|
/external/deqp-deps/glslang/Test/ |
D | decls.frag | 17 int in8[4] = int[](21, 22, 23, 24), ip;
|
/external/tensorflow/tensorflow/compiler/mlir/lite/tests/ |
D | load-quantization-recipe.mlir | 33 // CHECK-NEXT: %[[in8:.*]] = "tfl.logistic"(%[[in7]]) 72 // CHECK-NEXT: %[[ac2:.*]] = tfl.mul %[[in8]], %[[ce7]]
|
/external/deqp-deps/glslang/Test/baseResults/ |
D | decls.frag.out | 78 0:17 'in8' ( global 4-element array of int) 204 0:? 'in8' ( global 4-element array of int) 323 0:17 'in8' ( global 4-element array of int) 449 0:? 'in8' ( global 4-element array of int)
|
/external/angle/third_party/vulkan-deps/glslang/src/Test/baseResults/ |
D | decls.frag.out | 78 0:17 'in8' ( global 4-element array of int) 204 0:? 'in8' ( global 4-element array of int) 323 0:17 'in8' ( global 4-element array of int) 449 0:? 'in8' ( global 4-element array of int)
|
/external/libvpx/libvpx/vpx_dsp/ppc/ |
D | inv_txfm_vsx.c | 427 #define IDCT16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, inA, inB, \ argument 432 out1 = in8; \ 458 STEP8_0(out8, outF, in8, inF, cospi30_v, cospi2_v); \ 472 out8 = vec_add(in8, in9); \ 473 out9 = vec_sub(in8, in9); \ 489 in8 = out8; \ 531 out8 = vec_add(in8, inB); \ 534 outB = vec_sub(in8, inB); \ 549 in8 = out8; \ 564 out7 = vec_add(in7, in8); \ [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | macros_msa.h | 1184 #define ILVR_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument 1190 ILVR_B4(RTYPE, in8, in9, in10, in11, in12, in13, in14, in15, out4, out5, \ 1775 #define BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, \ argument 1787 out7 = in7 + in8; \ 1789 out8 = in7 - in8; \ 1827 #define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, \ argument 1834 ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
|