/external/libhevc/common/arm/ |
D | ihevc_inter_pred_chroma_copy_w16out.s | 139 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp) 141 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 145 vld1.8 {d22},[r5],r2 @vld1_u8(pu1_src_tmp) 149 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 150 vld1.8 {d24},[r5],r2 @vld1_u8(pu1_src_tmp) 152 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 155 vld1.8 {d26},[r5],r2 @vld1_u8(pu1_src_tmp) 157 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp) 181 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp) 183 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_inter_pred_luma_copy_w16out.s | 107 vld1.8 {d0},[r0] @vld1_u8(pu1_src_tmp) 109 vmovl.u8 q0,d0 @vmovl_u8(vld1_u8(pu1_src_tmp) 113 vld1.8 {d22},[r5],r2 @vld1_u8(pu1_src_tmp) 117 vmovl.u8 q11,d22 @vmovl_u8(vld1_u8(pu1_src_tmp) 118 vld1.8 {d24},[r5],r2 @vld1_u8(pu1_src_tmp) 120 vmovl.u8 q12,d24 @vmovl_u8(vld1_u8(pu1_src_tmp) 123 vld1.8 {d26},[r5],r2 @vld1_u8(pu1_src_tmp) 125 vmovl.u8 q13,d26 @vmovl_u8(vld1_u8(pu1_src_tmp) 154 vld1.8 {d8},[r0]! @vld1_u8(pu1_src_tmp) 155 vld1.8 {d10},[r6],r2 @vld1_u8(pu1_src_tmp) [all …]
|
D | ihevc_inter_pred_filters_luma_vert_w16inp.s | 149 vld1.16 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 150 vld1.16 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@ 152 vld1.16 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 154 vld1.16 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 156 vld1.16 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 158 vld1.16 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 160 vld1.16 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 162 vld1.16 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 167 vld1.16 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 174 vld1.16 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ [all …]
|
D | ihevc_inter_pred_luma_vert_w16inp_w16out.s | 159 vld1.16 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 160 vld1.16 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@ 162 vld1.16 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 164 vld1.16 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 166 vld1.16 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 168 vld1.16 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 170 vld1.16 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 172 vld1.16 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 177 vld1.16 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 184 vld1.16 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ [all …]
|
D | ihevc_inter_pred_filters_luma_vert.s | 160 vld1.u8 {d1},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 161 vld1.u8 {d0},[r0]! @src_tmp1 = vld1_u8(pu1_src_tmp)@ 163 vld1.u8 {d2},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 165 vld1.u8 {d3},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 167 vld1.u8 {d4},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 169 vld1.u8 {d5},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ 171 vld1.u8 {d6},[r3],r2 @src_tmp3 = vld1_u8(pu1_src_tmp)@ 173 vld1.u8 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@ 175 vld1.u8 {d16},[r3],r2 @src_tmp1 = vld1_u8(pu1_src_tmp)@ 177 vld1.u8 {d17},[r3],r2 @src_tmp2 = vld1_u8(pu1_src_tmp)@ [all …]
|
D | ihevc_inter_pred_chroma_copy.s | 186 vld1.8 {d0},[r0]! @vld1_u8(pu1_src_tmp) 189 vld1.8 {d1},[r5],r2 @vld1_u8(pu1_src_tmp) 192 vld1.8 {d2},[r5],r2 @vld1_u8(pu1_src_tmp) 194 vld1.8 {d3},[r5],r2 @vld1_u8(pu1_src_tmp) 213 vld1.8 {d0},[r0]! @vld1_u8(pu1_src_tmp) 216 vld1.8 {d1},[r5],r2 @vld1_u8(pu1_src_tmp) 233 vld1.8 {q0},[r0]! @vld1_u8(pu1_src_tmp) 236 vld1.8 {q1},[r5],r2 @vld1_u8(pu1_src_tmp) 239 vld1.8 {q2},[r5],r2 @vld1_u8(pu1_src_tmp) 241 vld1.8 {q3},[r5],r2 @vld1_u8(pu1_src_tmp) [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | vp8_loopfilter_neon.c | 186 d6 = vld1_u8(u); in vp8_loop_filter_horizontal_edge_uv_neon() 188 d7 = vld1_u8(v); in vp8_loop_filter_horizontal_edge_uv_neon() 190 d8 = vld1_u8(u); in vp8_loop_filter_horizontal_edge_uv_neon() 192 d9 = vld1_u8(v); in vp8_loop_filter_horizontal_edge_uv_neon() 194 d10 = vld1_u8(u); in vp8_loop_filter_horizontal_edge_uv_neon() 196 d11 = vld1_u8(v); in vp8_loop_filter_horizontal_edge_uv_neon() 198 d12 = vld1_u8(u); in vp8_loop_filter_horizontal_edge_uv_neon() 200 d13 = vld1_u8(v); in vp8_loop_filter_horizontal_edge_uv_neon() 202 d14 = vld1_u8(u); in vp8_loop_filter_horizontal_edge_uv_neon() 204 d15 = vld1_u8(v); in vp8_loop_filter_horizontal_edge_uv_neon() [all …]
|
D | bilinearpredict_neon.c | 24 return vreinterpret_u8_u64(vshl_n_u64(vreinterpret_u64_u8(vld1_u8(a)), 32)); in load_and_shift() 38 a1 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 42 a3 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 44 a4 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 58 a0 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 60 a1 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 62 a2 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 64 a3 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 66 a4 = vld1_u8(src_ptr); in vp8_bilinear_predict4x4_neon() 132 d22u8 = vld1_u8(src_ptr); in vp8_bilinear_predict8x4_neon() [all …]
|
D | mbloopfilter_neon.c | 219 d6 = vld1_u8(u); in vp8_mbloop_filter_horizontal_edge_uv_neon() 221 d7 = vld1_u8(v); in vp8_mbloop_filter_horizontal_edge_uv_neon() 223 d8 = vld1_u8(u); in vp8_mbloop_filter_horizontal_edge_uv_neon() 225 d9 = vld1_u8(v); in vp8_mbloop_filter_horizontal_edge_uv_neon() 227 d10 = vld1_u8(u); in vp8_mbloop_filter_horizontal_edge_uv_neon() 229 d11 = vld1_u8(v); in vp8_mbloop_filter_horizontal_edge_uv_neon() 231 d12 = vld1_u8(u); in vp8_mbloop_filter_horizontal_edge_uv_neon() 233 d13 = vld1_u8(v); in vp8_mbloop_filter_horizontal_edge_uv_neon() 235 d14 = vld1_u8(u); in vp8_mbloop_filter_horizontal_edge_uv_neon() 237 d15 = vld1_u8(v); in vp8_mbloop_filter_horizontal_edge_uv_neon() [all …]
|
/external/XNNPACK/src/qu8-dwconv/ |
D | up8x9-minmax-neon.c | 80 const uint8x8_t vk0 = vld1_u8(w); w = (void*) ((uintptr_t) w + sizeof(uint8x8_t)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 81 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 87 const uint8x8_t vk1 = vld1_u8(w); w = (void*) ((uintptr_t) w + sizeof(uint8x8_t)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 88 const uint8x8_t vi1 = vld1_u8(i1); i1 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 94 const uint8x8_t vk2 = vld1_u8(w); w = (void*) ((uintptr_t) w + sizeof(uint8x8_t)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 95 const uint8x8_t vi2 = vld1_u8(i2); i2 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 101 const uint8x8_t vk3 = vld1_u8(w); w = (void*) ((uintptr_t) w + sizeof(uint8x8_t)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 102 const uint8x8_t vi3 = vld1_u8(i3); i3 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 108 const uint8x8_t vk4 = vld1_u8(w); w = (void*) ((uintptr_t) w + sizeof(uint8x8_t)); in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() 109 const uint8x8_t vi4 = vld1_u8(i4); i4 += 8; in xnn_qu8_dwconv_minmax_ukernel_up8x9__neon() [all …]
|
/external/XNNPACK/src/qu8-gavgpool/ |
D | 7p7x-minmax-neon-c8.c | 44 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 45 const uint8x8_t vi1 = vld1_u8(i1); i1 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 46 const uint8x8_t vi2 = vld1_u8(i2); i2 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 47 const uint8x8_t vi3 = vld1_u8(i3); i3 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 48 const uint8x8_t vi4 = vld1_u8(i4); i4 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 49 const uint8x8_t vi5 = vld1_u8(i5); i5 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 50 const uint8x8_t vi6 = vld1_u8(i6); i6 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 79 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 80 const uint8x8_t vi1 = vld1_u8(i1); i1 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() 81 const uint8x8_t vi2 = vld1_u8(i2); i2 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7p7x__neon_c8() [all …]
|
D | 7x-minmax-neon-c8.c | 67 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 68 const uint8x8_t vi1 = vld1_u8(i1); i1 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 69 const uint8x8_t vi2 = vld1_u8(i2); i2 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 70 const uint8x8_t vi3 = vld1_u8(i3); i3 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 71 const uint8x8_t vi4 = vld1_u8(i4); i4 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 72 const uint8x8_t vi5 = vld1_u8(i5); i5 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 73 const uint8x8_t vi6 = vld1_u8(i6); i6 += 8; in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 137 const uint8x8_t vi0 = vld1_u8(i0); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 138 const uint8x8_t vi1 = vld1_u8(i1); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() 139 const uint8x8_t vi2 = vld1_u8(i2); in xnn_qu8_gavgpool_minmax_ukernel_7x__neon_c8() [all …]
|
/external/XNNPACK/src/qu8-avgpool/ |
D | 9p8x-minmax-neon-c8.c | 95 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 96 const uint8x8_t vi1 = vld1_u8(i1); i1 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 97 const uint8x8_t vi2 = vld1_u8(i2); i2 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 98 const uint8x8_t vi3 = vld1_u8(i3); i3 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 99 const uint8x8_t vi4 = vld1_u8(i4); i4 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 100 const uint8x8_t vi5 = vld1_u8(i5); i5 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 101 const uint8x8_t vi6 = vld1_u8(i6); i6 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 102 const uint8x8_t vi7 = vld1_u8(i7); i7 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 103 const uint8x8_t vi8 = vld1_u8(i8); i8 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() 167 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_avgpool_minmax_ukernel_9p8x__neon_c8() [all …]
|
D | 9x-minmax-neon-c8.c | 119 const uint8x8_t vi0 = vld1_u8(i0); i0 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 120 const uint8x8_t vi1 = vld1_u8(i1); i1 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 121 const uint8x8_t vi2 = vld1_u8(i2); i2 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 122 const uint8x8_t vi3 = vld1_u8(i3); i3 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 123 const uint8x8_t vi4 = vld1_u8(i4); i4 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 124 const uint8x8_t vi5 = vld1_u8(i5); i5 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 125 const uint8x8_t vi6 = vld1_u8(i6); i6 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 126 const uint8x8_t vi7 = vld1_u8(i7); i7 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 127 const uint8x8_t vi8 = vld1_u8(i8); i8 += 8; in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() 192 const uint8x8_t vi0 = vld1_u8(i0); in xnn_qu8_avgpool_minmax_ukernel_9x__neon_c8() [all …]
|
/external/XNNPACK/src/x8-zip/ |
D | x4-neon.c | 28 vxyzw.val[0] = vld1_u8(x); x += 8; in xnn_x8_zip_x4_ukernel__neon() 29 vxyzw.val[1] = vld1_u8(y); y += 8; in xnn_x8_zip_x4_ukernel__neon() 30 vxyzw.val[2] = vld1_u8(z); z += 8; in xnn_x8_zip_x4_ukernel__neon() 31 vxyzw.val[3] = vld1_u8(w); w += 8; in xnn_x8_zip_x4_ukernel__neon() 38 vxyzw.val[0] = vld1_u8(x + address_increment); in xnn_x8_zip_x4_ukernel__neon() 39 vxyzw.val[1] = vld1_u8(y + address_increment); in xnn_x8_zip_x4_ukernel__neon() 40 vxyzw.val[2] = vld1_u8(z + address_increment); in xnn_x8_zip_x4_ukernel__neon() 41 vxyzw.val[3] = vld1_u8(w + address_increment); in xnn_x8_zip_x4_ukernel__neon()
|
D | x3-neon.c | 27 vxyz.val[0] = vld1_u8(x); x += 8; in xnn_x8_zip_x3_ukernel__neon() 28 vxyz.val[1] = vld1_u8(y); y += 8; in xnn_x8_zip_x3_ukernel__neon() 29 vxyz.val[2] = vld1_u8(z); z += 8; in xnn_x8_zip_x3_ukernel__neon() 36 vxyz.val[0] = vld1_u8(x + address_increment); in xnn_x8_zip_x3_ukernel__neon() 37 vxyz.val[1] = vld1_u8(y + address_increment); in xnn_x8_zip_x3_ukernel__neon() 38 vxyz.val[2] = vld1_u8(z + address_increment); in xnn_x8_zip_x3_ukernel__neon()
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | avg_neon.c | 34 b = vld1_u8(a); in vpx_avg_8x8_neon() 36 c = vld1_u8(a); in vpx_avg_8x8_neon() 41 const uint8x8_t d = vld1_u8(a); in vpx_avg_8x8_neon() 191 const uint8x16_t a01 = vcombine_u8(vld1_u8(a), vld1_u8(a + a_stride)); in vpx_minmax_8x8_neon() 193 vcombine_u8(vld1_u8(a + 2 * a_stride), vld1_u8(a + 3 * a_stride)); in vpx_minmax_8x8_neon() 195 vcombine_u8(vld1_u8(a + 4 * a_stride), vld1_u8(a + 5 * a_stride)); in vpx_minmax_8x8_neon() 197 vcombine_u8(vld1_u8(a + 6 * a_stride), vld1_u8(a + 7 * a_stride)); in vpx_minmax_8x8_neon() 199 const uint8x16_t b01 = vcombine_u8(vld1_u8(b), vld1_u8(b + b_stride)); in vpx_minmax_8x8_neon() 201 vcombine_u8(vld1_u8(b + 2 * b_stride), vld1_u8(b + 3 * b_stride)); in vpx_minmax_8x8_neon() 203 vcombine_u8(vld1_u8(b + 4 * b_stride), vld1_u8(b + 5 * b_stride)); in vpx_minmax_8x8_neon() [all …]
|
D | vpx_convolve8_neon.h | 22 *s0 = vld1_u8(s); in load_u8_8x4() 24 *s1 = vld1_u8(s); in load_u8_8x4() 26 *s2 = vld1_u8(s); in load_u8_8x4() 28 *s3 = vld1_u8(s); in load_u8_8x4() 36 *s0 = vld1_u8(s); in load_u8_8x8() 38 *s1 = vld1_u8(s); in load_u8_8x8() 40 *s2 = vld1_u8(s); in load_u8_8x8() 42 *s3 = vld1_u8(s); in load_u8_8x8() 44 *s4 = vld1_u8(s); in load_u8_8x8() 46 *s5 = vld1_u8(s); in load_u8_8x8() [all …]
|
D | vpx_convolve8_neon.c | 562 d01 = vcombine_u8(vld1_u8(d + 0 * dst_stride), in vpx_convolve8_avg_horiz_neon() 563 vld1_u8(d + 1 * dst_stride)); in vpx_convolve8_avg_horiz_neon() 564 d23 = vcombine_u8(vld1_u8(d + 2 * dst_stride), in vpx_convolve8_avg_horiz_neon() 565 vld1_u8(d + 3 * dst_stride)); in vpx_convolve8_avg_horiz_neon() 566 d45 = vcombine_u8(vld1_u8(d + 4 * dst_stride), in vpx_convolve8_avg_horiz_neon() 567 vld1_u8(d + 5 * dst_stride)); in vpx_convolve8_avg_horiz_neon() 568 d67 = vcombine_u8(vld1_u8(d + 6 * dst_stride), in vpx_convolve8_avg_horiz_neon() 569 vld1_u8(d + 7 * dst_stride)); in vpx_convolve8_avg_horiz_neon() 621 s0 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() 623 s1 = vreinterpret_s16_u16(vget_low_u16(vmovl_u8(vld1_u8(src)))); in vpx_convolve8_vert_neon() [all …]
|
D | vpx_convolve8_avg_vert_filter_type1_neon.asm | 78 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 79 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 81 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 84 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 87 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 90 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 93 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 96 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 99 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 102 vld1.u8 {d17}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_avg_vert_filter_type2_neon.asm | 79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 103 vld1.u8 {d17}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_vert_filter_type1_neon.asm | 79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 103 vld1.u8 {d17}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_vert_filter_type2_neon.asm | 79 vld1.u8 {d1}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 80 vld1.u8 {d0}, [r0]! ;src_tmp1 = vld1_u8(pu1_src_tmp); 82 vld1.u8 {d2}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d3}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d4}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d5}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d6}, [r3], r2 ;src_tmp3 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d7}, [r3], r2 ;src_tmp4 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d16}, [r3], r2 ;src_tmp1 = vld1_u8(pu1_src_tmp); 103 vld1.u8 {d17}, [r3], r2 ;src_tmp2 = vld1_u8(pu1_src_tmp); [all …]
|
/external/libvpx/config/arm-neon/vpx_dsp/arm/ |
D | vpx_convolve8_avg_vert_filter_type2_neon.asm.S | 85 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 86 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp); 88 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 91 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 94 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 97 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 100 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 103 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 106 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 109 vld1.u8 {d17}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); [all …]
|
D | vpx_convolve8_avg_vert_filter_type1_neon.asm.S | 84 vld1.u8 {d1}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 85 vld1.u8 {d0}, [r0]! @src_tmp1 = vld1_u8(pu1_src_tmp); 87 vld1.u8 {d2}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 90 vld1.u8 {d3}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 93 vld1.u8 {d4}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 96 vld1.u8 {d5}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); 99 vld1.u8 {d6}, [r3], r2 @src_tmp3 = vld1_u8(pu1_src_tmp); 102 vld1.u8 {d7}, [r3], r2 @src_tmp4 = vld1_u8(pu1_src_tmp); 105 vld1.u8 {d16}, [r3], r2 @src_tmp1 = vld1_u8(pu1_src_tmp); 108 vld1.u8 {d17}, [r3], r2 @src_tmp2 = vld1_u8(pu1_src_tmp); [all …]
|