Home
last modified time | relevance | path

Searched refs:tu0 (Results 1 – 6 of 6) sorted by relevance

/external/libaom/libaom/av1/common/arm/
Dmem_neon.h319 uint32x2_t *tu0, uint32x2_t *tu1, in load_unaligned_u8_4x8() argument
325 *tu0 = vset_lane_u32(a, *tu0, 0); in load_unaligned_u8_4x8()
328 *tu0 = vset_lane_u32(a, *tu0, 1); in load_unaligned_u8_4x8()
349 uint32x2_t *tu0, uint32x2_t *tu1) { in load_unaligned_u8_4x4() argument
354 *tu0 = vset_lane_u32(a, *tu0, 0); in load_unaligned_u8_4x4()
357 *tu0 = vset_lane_u32(a, *tu0, 1); in load_unaligned_u8_4x4()
366 uint32x2_t *tu0) { in load_unaligned_u8_4x1() argument
371 *tu0 = vset_lane_u32(a, *tu0, 0); in load_unaligned_u8_4x1()
375 uint32x2_t *tu0) { in load_unaligned_u8_4x2() argument
380 *tu0 = vset_lane_u32(a, *tu0, 0); in load_unaligned_u8_4x2()
[all …]
Djnt_convolve_neon.c942 uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0); in av1_dist_wtd_convolve_x_neon() local
981 load_unaligned_u8_4x4(s, src_stride, &tu0, &tu1); in av1_dist_wtd_convolve_x_neon()
982 t0 = vreinterpret_u8_u32(tu0); in av1_dist_wtd_convolve_x_neon()
1402 uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0), tu2 = vdup_n_u32(0), in av1_dist_wtd_convolve_y_neon() local
1426 load_unaligned_u8_4x8(s, src_stride, &tu0, &tu1, &tu2, &tu3); in av1_dist_wtd_convolve_y_neon()
1428 u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0))); in av1_dist_wtd_convolve_y_neon()
1449 load_unaligned_u8_4x4(s, src_stride, &tu0, &tu1); in av1_dist_wtd_convolve_y_neon()
1451 u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0))); in av1_dist_wtd_convolve_y_neon()
1518 load_unaligned_u8_4x1(s, src_stride, &tu0); in av1_dist_wtd_convolve_y_neon()
1519 u0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0))); in av1_dist_wtd_convolve_y_neon()
/external/libaom/libaom/aom_dsp/arm/
Dblend_a64_mask_neon.c89 uint64x2_t tu0 = vdupq_n_u64(0), tu1 = vdupq_n_u64(0), tu2 = vdupq_n_u64(0), in blend_4x4() local
94 load_unaligned_u16_4x4(src0, src0_stride, &tu0, &tu1); in blend_4x4()
97 src0_0 = vreinterpretq_s16_u64(tu0); in blend_4x4()
158 uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0), tu2 = vdup_n_u32(0), in aom_lowbd_blend_a64_d16_mask_neon() local
204 load_unaligned_u8_4x4(mask_tmp, mask_stride, &tu0, &tu1); in aom_lowbd_blend_a64_d16_mask_neon()
206 mask0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0))); in aom_lowbd_blend_a64_d16_mask_neon()
419 load_unaligned_u8_4x4(mask_tmp, 2 * mask_stride, &tu0, &tu1); in aom_lowbd_blend_a64_d16_mask_neon()
423 s0 = vreinterpret_u8_u32(tu0); in aom_lowbd_blend_a64_d16_mask_neon()
/external/webp/src/dsp/
Dupsampling_msa.c589 v16u8 tu0, tu1, tv0, tv1, cu0, cu1, cv0, cv1; \
590 LD_UB2(top_u, 1, tu0, tu1); \
594 UPSAMPLE_32PIXELS(tu0, tu1, cu0, cu1); \
596 ST_UB4(tu0, tu1, cu0, cu1, &temp_u[0], 16); \
613 v16u8 tu0, tu1, tv0, tv1, cu0, cu1, cv0, cv1; \
618 LD_UB2(&temp_u[ 0], 1, tu0, tu1); \
622 UPSAMPLE_32PIXELS(tu0, tu1, cu0, cu1); \
624 ST_UB4(tu0, tu1, cu0, cu1, &temp_u[0], 16); \
/external/honggfuzz/examples/apache-httpd/corpus_http1/
D29cd5131fbc89870caa99aba341f1445.00018f48.honggfuzz.cov269 G7rs*�t��ک��<�_=�A偪�3tu0�� �c��^M�;����V�
/external/honggfuzz/examples/apache-httpd/corpus_http2/
D29cd5131fbc89870caa99aba341f1445.00018f48.honggfuzz.cov269 G7rs*�t��ک��<�_=�A偪�3tu0�� �c��^M�;����V�