Home
last modified time | relevance | path

Searched refs:vsub (Results 1 – 25 of 140) sorted by relevance

123456

/external/llvm/test/MC/ARM/
Dneon-sub-encoding.s3 vsub.i8 d16, d17, d16
4 vsub.i16 d16, d17, d16
5 vsub.i32 d16, d17, d16
6 vsub.i64 d16, d17, d16
7 vsub.f32 d16, d16, d17
8 vsub.i8 q8, q8, q9
9 vsub.i16 q8, q8, q9
10 vsub.i32 q8, q8, q9
11 vsub.i64 q8, q8, q9
12 vsub.f32 q8, q8, q9
[all …]
/external/capstone/suite/MC/ARM/
Dneon-sub-encoding.s.cs2 0xa0,0x08,0x41,0xf3 = vsub.i8 d16, d17, d16
3 0xa0,0x08,0x51,0xf3 = vsub.i16 d16, d17, d16
4 0xa0,0x08,0x61,0xf3 = vsub.i32 d16, d17, d16
5 0xa0,0x08,0x71,0xf3 = vsub.i64 d16, d17, d16
6 0xa1,0x0d,0x60,0xf2 = vsub.f32 d16, d16, d17
7 0xe2,0x08,0x40,0xf3 = vsub.i8 q8, q8, q9
8 0xe2,0x08,0x50,0xf3 = vsub.i16 q8, q8, q9
9 0xe2,0x08,0x60,0xf3 = vsub.i32 q8, q8, q9
10 0xe2,0x08,0x70,0xf3 = vsub.i64 q8, q8, q9
11 0xe2,0x0d,0x60,0xf2 = vsub.f32 q8, q8, q9
[all …]
/external/ComputeLibrary/src/core/NEON/wrapper/intrinsics/
Dsub.h34 inline vtype vsub(const vtype &a, const vtype &b) \
39 VSUB_IMPL(uint8x8_t, uint8x8_t, vsub, u8)
40 VSUB_IMPL(int8x8_t, int8x8_t, vsub, s8)
41 VSUB_IMPL(uint16x4_t, uint16x4_t, vsub, u16)
42 VSUB_IMPL(int16x4_t, int16x4_t, vsub, s16)
43 VSUB_IMPL(uint32x2_t, uint32x2_t, vsub, u32)
44 VSUB_IMPL(int32x2_t, int32x2_t, vsub, s32)
45 VSUB_IMPL(uint64x1_t, uint64x1_t, vsub, u64)
46 VSUB_IMPL(int64x1_t, int64x1_t, vsub, s64)
47 VSUB_IMPL(float32x2_t, float32x2_t, vsub, f32)
[all …]
/external/libhevc/common/arm/
Dihevc_itrans_recon_8x8.s249 vsub.s32 q10,q10,q11 @// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1)
257 vsub.s32 q5,q5,q3 @// a3 = c0 - d0(part of r3,r4)
258 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5)
262 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7)
265 vsub.s32 q11,q11,q14 @// a2 - b2(part of r5)
268 vsub.s32 q9,q9,q13 @// a1 - b1(part of r6)
271 vsub.s32 q15,q5,q15 @// a3 - b3(part of r4)
325 vsub.s32 q5,q10,q3 @// a3 = c0 - d0(part of r3,r4)
326 vsub.s32 q11,q10,q9 @// a2 = c1 - d1(part of r2,r5)
330 vsub.s32 q3,q7,q12 @// a0 - b0(part of r7)
[all …]
Dihevc_intra_pred_luma_mode_3_to_9.s201 vsub.s8 d8, d8, d2 @ref_main_idx (sub row)
202 vsub.s8 d8, d26, d8 @ref_main_idx (row 0)
204 vsub.s8 d9, d8, d2 @ref_main_idx + 1 (row 0)
206 vsub.s8 d7, d28, d6 @32-fract
209 vsub.s8 d4, d8, d2 @ref_main_idx (row 1)
210 vsub.s8 d5, d9, d2 @ref_main_idx + 1 (row 1)
217 vsub.s8 d8, d8, d3 @ref_main_idx (row 2)
218 vsub.s8 d9, d9, d3 @ref_main_idx + 1 (row 2)
227 vsub.s8 d4, d4, d3 @ref_main_idx (row 3)
228 vsub.s8 d5, d5, d3 @ref_main_idx + 1 (row 3)
[all …]
Dihevc_intra_pred_chroma_mode_3_to_9.s197 vsub.s8 d8, d8, d27 @ref_main_idx (sub row)
198 vsub.s8 d8, d26, d8 @ref_main_idx (row 0)
200 vsub.s8 d9, d8, d29 @ref_main_idx + 1 (row 0)
202 vsub.s8 d7, d28, d6 @32-fract
205 vsub.s8 d4, d8, d29 @ref_main_idx (row 1)
206 vsub.s8 d5, d9, d29 @ref_main_idx + 1 (row 1)
215 vsub.s8 d8, d8, d29 @ref_main_idx (row 2)
216 vsub.s8 d9, d9, d29 @ref_main_idx + 1 (row 2)
225 vsub.s8 d4, d4, d29 @ref_main_idx (row 3)
226 vsub.s8 d5, d5, d29 @ref_main_idx + 1 (row 3)
[all …]
Dihevc_intra_pred_luma_planar.s187 vsub.s8 d9, d2, d8 @(1-8)[nt-1-col]
204 vsub.s8 d6, d6, d7 @(1)
218 vsub.s8 d6, d6, d7 @(2)
235 vsub.s8 d6, d6, d7 @(3)
252 vsub.s8 d6, d6, d7 @(4)
268 vsub.s8 d6, d6, d7 @(5)
285 vsub.s8 d6, d6, d7 @(6)
302 vsub.s8 d6, d6, d7 @(7)
339 vsub.s8 d9, d2, d8 @(1n)(1-8)[nt-1-col]
342 vsub.s8 d6, d2, d5
[all …]
Dihevc_intra_pred_filters_chroma_mode_19_to_25.s284 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
294 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
308 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
327 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
342 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
358 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
374 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
405 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
422 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
439 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
[all …]
Dihevc_intra_pred_chroma_mode_27_to_33.s175 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
186 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
200 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
219 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
236 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
253 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
266 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
296 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
313 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
328 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
[all …]
Dihevc_intra_pred_luma_mode_27_to_33.s177 vsub.u8 d30,d1,d31 @32-fract(dup_const_32_fract)
188 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
202 vsub.u8 d26,d1,d27 @(iii)32-fract(dup_const_32_fract)
220 vsub.u8 d24,d1,d25 @(iv)32-fract(dup_const_32_fract)
237 vsub.u8 d30,d1,d31 @(v)32-fract(dup_const_32_fract)
254 vsub.u8 d28,d1,d29 @(vi)32-fract(dup_const_32_fract)
267 vsub.u8 d26,d1,d27 @(vii)32-fract(dup_const_32_fract)
296 vsub.u8 d24,d1,d25 @(viii)32-fract(dup_const_32_fract)
313 vsub.u8 d30,d1,d31 @(i)32-fract(dup_const_32_fract)
328 vsub.u8 d28,d1,d29 @(ii)32-fract(dup_const_32_fract)
[all …]
Dihevc_intra_pred_chroma_planar.s174 vsub.s8 d30, d2, d8 @[nt-1-col]
175 vsub.s8 d31, d2, d9
200 vsub.s8 d19, d6, d7 @[nt-1-row]--
220 vsub.s8 d6, d19, d7 @[nt-1-row]--
242 vsub.s8 d19, d6, d7 @[nt-1-row]--
267 vsub.s8 d6, d19, d7 @[nt-1-row]--
322 vsub.s8 d30, d2, d8 @[nt-1-col]
323 vsub.s8 d31, d2, d9
339 vsub.s8 d9, d2, d8 @[nt-1-col]
353 vsub.s8 d6, d6, d7 @[nt-1-row]--
/external/llvm/test/MC/Hexagon/
Dv60-alu.s29 #CHECK: 1cb4cabe { v31:30.h = vsub(v10.ub,{{ *}}v20.ub) }
30 v31:30.h=vsub(v10.ub,v20.ub)
32 #CHECK: 1cb8cada { v27:26.w = vsub(v10.uh,{{ *}}v24.uh) }
33 v27:26.w=vsub(v10.uh,v24.uh)
35 #CHECK: 1cbcdbe8 { v9:8.w = vsub(v27.h,{{ *}}v28.h) }
36 v9:8.w=vsub(v27.h,v28.h)
38 #CHECK: 1caeca00 { v1:0.h = vsub(v11:10.h,{{ *}}v15:14.h):sat }
39 v1:0.h=vsub(v11:10.h,v15:14.h):sat
41 #CHECK: 1ca8c43e { v31:30.w = vsub(v5:4.w,{{ *}}v9:8.w):sat }
42 v31:30.w=vsub(v5:4.w,v9:8.w):sat
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s504 vsub.s32 q10, q10, q11 @// c1 = y0 * cos4 - y4 * cos4(part of a0 and a1)
512 vsub.s32 q5, q5, q3 @// a3 = c0 - d0(part of r3,r4)
513 vsub.s32 q11, q10, q9 @// a2 = c1 - d1(part of r2,r5)
517 vsub.s32 q3, q7, q12 @// a0 - b0(part of r7)
520 vsub.s32 q11, q11, q14 @// a2 - b2(part of r5)
523 vsub.s32 q9, q9, q13 @// a1 - b1(part of r6)
526 vsub.s32 q15, q5, q15 @// a3 - b3(part of r4)
584 vsub.s32 q5, q10, q3 @// a3 = c0 - d0(part of r3,r4)
585 vsub.s32 q11, q10, q9 @// a2 = c1 - d1(part of r2,r5)
589 vsub.s32 q3, q7, q12 @// a0 - b0(part of r7)
[all …]
/external/llvm/test/CodeGen/ARM/
Dfnmscs.ll29 ; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
33 ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
50 ; A8U: vsub.f32 d{{[0-9]}}, d{{[0-9]}}, d{{[0-9]}}
54 ; A8: vsub.f32 s{{[0-9]}}, s{{[0-9]}}, s{{[0-9]}}
71 ; A8U: vsub.f64 d
75 ; A8: vsub.f64 d
92 ; A8U: vsub.f64 d
96 ; A8: vsub.f64 d
Dfsubs.ll22 ; VFP2: vsub.f32 s
23 ; NFP1U: vsub.f32 d
24 ; NFP1: vsub.f32 s
25 ; NFP0: vsub.f32 s
Dcttz_vector.ll55 ; CHECK: vsub.i8 [[D1]], [[D1]], [[D2]]
70 ; CHECK: vsub.i8 [[Q1]], [[Q1]], [[Q2]]
101 ; CHECK: vsub.i16 [[D1]], [[D1]], [[D2]]
117 ; CHECK: vsub.i16 [[Q1]], [[Q1]], [[Q2]]
141 ; CHECK: vsub.i32 [[D1]], [[D1]], [[D2]]
158 ; CHECK: vsub.i32 [[Q1]], [[Q1]], [[Q2]]
174 ; CHECK: vsub.i64 [[D2]], [[D2]], [[D1]]
193 ; CHECK: vsub.i64 [[Q2]], [[Q2]], [[Q1:q[0-9]+]]
239 ; CHECK: vsub.i8 [[D1]], [[D1]], [[D2]]
254 ; CHECK: vsub.i8 [[Q1]], [[Q1]], [[Q2]]
[all …]
/external/cronet/third_party/boringssl/src/crypto/curve25519/asm/
Dx25519-asm-arm.S138 vsub.i64 q4,q4,q12
140 vsub.i64 q10,q10,q13
146 vsub.i64 q5,q5,q12
153 vsub.i64 q11,q11,q13
155 vsub.i64 q6,q6,q12
164 vsub.i64 q2,q2,q13
166 vsub.i64 q7,q7,q12
177 vsub.i64 q7,q8,q12
186 vsub.i64 q0,q9,q0
188 vsub.i64 q3,q3,q10
[all …]
/external/rust/crates/ring/crypto/curve25519/asm/
Dx25519-asm-arm.S136 vsub.i64 q4,q4,q12
138 vsub.i64 q10,q10,q13
144 vsub.i64 q5,q5,q12
151 vsub.i64 q11,q11,q13
153 vsub.i64 q6,q6,q12
162 vsub.i64 q2,q2,q13
164 vsub.i64 q7,q7,q12
175 vsub.i64 q7,q8,q12
184 vsub.i64 q0,q9,q0
186 vsub.i64 q3,q3,q10
[all …]
/external/rust/crates/quiche/deps/boringssl/src/crypto/curve25519/asm/
Dx25519-asm-arm.S138 vsub.i64 q4,q4,q12
140 vsub.i64 q10,q10,q13
146 vsub.i64 q5,q5,q12
153 vsub.i64 q11,q11,q13
155 vsub.i64 q6,q6,q12
164 vsub.i64 q2,q2,q13
166 vsub.i64 q7,q7,q12
177 vsub.i64 q7,q8,q12
186 vsub.i64 q0,q9,q0
188 vsub.i64 q3,q3,q10
[all …]
/external/boringssl/src/crypto/curve25519/asm/
Dx25519-asm-arm.S138 vsub.i64 q4,q4,q12
140 vsub.i64 q10,q10,q13
146 vsub.i64 q5,q5,q12
153 vsub.i64 q11,q11,q13
155 vsub.i64 q6,q6,q12
164 vsub.i64 q2,q2,q13
166 vsub.i64 q7,q7,q12
177 vsub.i64 q7,q8,q12
186 vsub.i64 q0,q9,q0
188 vsub.i64 q3,q3,q10
[all …]
/external/ComputeLibrary/src/cpu/kernels/scale/neon/
Dqasymm8_signed.cpp206 …const auto in00_0 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgetl… in qasymm8_signed_neon_scale_bilinear()
207 …const auto in00_1 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgeth… in qasymm8_signed_neon_scale_bilinear()
208 …const auto in00_2 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgetl… in qasymm8_signed_neon_scale_bilinear()
209 …const auto in00_3 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgeth… in qasymm8_signed_neon_scale_bilinear()
214 …const auto in01_0 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgetl… in qasymm8_signed_neon_scale_bilinear()
215 …const auto in01_1 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgeth… in qasymm8_signed_neon_scale_bilinear()
216 …const auto in01_2 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgetl… in qasymm8_signed_neon_scale_bilinear()
217 …const auto in01_3 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgeth… in qasymm8_signed_neon_scale_bilinear()
222 …const auto in10_0 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgetl… in qasymm8_signed_neon_scale_bilinear()
223 …const auto in10_1 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vmovl(wrapper::vgeth… in qasymm8_signed_neon_scale_bilinear()
[all …]
Dqasymm8.cpp207 …const auto in00_0 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
208 …const auto in00_1 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
209 …const auto in00_2 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
210 …const auto in00_3 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
215 …const auto in01_0 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
216 …const auto in01_1 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
217 …const auto in01_2 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
218 …const auto in01_3 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
223 …const auto in10_0 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
224 …const auto in10_1 = wrapper::vmul(wrapper::vcvt<float>(wrapper::vsub(wrapper::vreinterpret(wrapper… in qasymm8_neon_scale_bilinear()
[all …]
/external/libavc/common/arm/
Dih264_resi_trans_quant_a9.s145 vsub.s16 d10, d2, d4 @x2 = x5-x6
146 vsub.s16 d11, d0, d6 @x3 = x4-x7
152 vsub.s16 d16, d8, d9 @x6 = x0 - x1;
154 vsub.s16 d17, d11, d12 @x7 = x3 - U_SHIFT(x2,1,shft);
166 vsub.s16 d20, d15, d16 @x2 = x5-x6
167 vsub.s16 d21, d14, d17 @x3 = x4-x7
175 vsub.s16 d26, d18, d19 @x7 = x0 - x1;
177 vsub.s16 d27, d21, d22 @x8 = x3 - U_SHIFT(x2,1,shft);
234 vsub.u8 d26, d25, d24 @I invert current nnz
340 vsub.s16 d10, d2, d4 @x2 = x5-x6
[all …]
Dih264_iquant_itrans_recon_a9.s170 vsub.s16 d5, d0, d2 @x1 = q0 - q1;
175 vsub.s16 d6, d8, d3 @x2 = (q0 >> 1) - q1;
181 vsub.s16 q6, q2, q3 @x0-x3 and x1-x2 combined
195 vsub.s16 d15, d10, d12 @x1 = q0 - q1;
200 vsub.s16 d16, d18, d13 @x2 = (q0 >> 1) - q1;
206 vsub.s16 q11, q7, q8 @x0-x3 and x1-x2 combined
344 vsub.s16 d5, d0, d2 @x1 = q0 - q1;
349 vsub.s16 d6, d8, d3 @x2 = (q0 >> 1) - q1;
355 vsub.s16 q6, q2, q3 @x0-x3 and x1-x2 combined
370 vsub.s16 d15, d10, d12 @x1 = q0 - q1;
[all …]
/external/igt-gpu-tools/lib/
Digt_fb.c102 uint8_t vsub; member
110 .hsub = 1, .vsub = 1,
118 .hsub = 1, .vsub = 1,
126 .hsub = 1, .vsub = 1,
134 .hsub = 1, .vsub = 1,
142 .hsub = 1, .vsub = 1,
150 .hsub = 1, .vsub = 1,
158 .hsub = 1, .vsub = 1,
165 .hsub = 1, .vsub = 1,
173 .hsub = 1, .vsub = 1,
[all …]

123456