/external/libaom/aom_dsp/arm/ |
D | sse_neon.c | 326 static INLINE uint32_t highbd_sse_W8x1_neon(uint16x8_t q2, uint16x8_t q3) { in highbd_sse_W8x1_neon() argument 331 uint16x8_t q4 = vabdq_u16(q2, q3); // diff = abs(a[x] - b[x]) in highbd_sse_W8x1_neon() 359 uint16x8_t q2, q3, q4, q5; in aom_highbd_sse_neon() local 373 q2 = vcombine_u16(d0, d1); // make a 8 data vector in aom_highbd_sse_neon() 376 sse += highbd_sse_W8x1_neon(q2, q3); in aom_highbd_sse_neon() 381 q2 = vld1q_u16(a); in aom_highbd_sse_neon() 384 sse += highbd_sse_W8x1_neon(q2, q3); in aom_highbd_sse_neon() 392 q2 = vld1q_u16(a); in aom_highbd_sse_neon() 395 sse += highbd_sse_W8x1_neon(q2, q3); in aom_highbd_sse_neon() 397 q2 = vld1q_u16(a + 8); in aom_highbd_sse_neon() [all …]
|
/external/llvm/test/MC/ARM/ |
D | vorr-vbic-illegal-cases.s | 5 vorr.i32 q2, #0xffffffff 7 vorr.i32 q2, #0xabababab 8 vorr.i16 q2, #0xabab 9 vorr.i16 q2, #0xabab 14 @ CHECK: vorr.i32 q2, #0xffffffff 18 @ CHECK: vorr.i32 q2, #0xabababab 20 @ CHECK: vorr.i16 q2, #0xabab 22 @ CHECK: vorr.i16 q2, #0xabab 25 vbic.i32 q2, #0xffffffff 27 vbic.i32 q2, #0xabababab [all …]
|
D | vmov-vmvn-byte-replicate.s | 6 @ CHECK: vmov.i8 q2, #0xff @ encoding: [0x5f,0x4e,0x87,0xf3] 8 @ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3] 9 @ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3] 10 @ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3] 13 @ CHECK: vmov.i8 q2, #0x0 @ encoding: [0x50,0x4e,0x80,0xf2] 15 @ CHECK: vmov.i8 q2, #0x54 @ encoding: [0x54,0x4e,0x85,0xf2] 17 @ CHECK: vmov.i8 q2, #0x54 @ encoding: [0x54,0x4e,0x85,0xf2] 20 vmov.i32 q2, #0xffffffff 22 vmov.i32 q2, #0xabababab 23 vmov.i16 q2, #0xabab [all …]
|
D | fullfp16-neon.s | 5 vadd.f16 q0, q1, q2 7 @ ARM: vadd.f16 q0, q1, q2 @ encoding: [0x44,0x0d,0x12,0xf2] 9 @ THUMB: vadd.f16 q0, q1, q2 @ encoding: [0x12,0xef,0x44,0x0d] 12 vsub.f16 q0, q1, q2 14 @ ARM: vsub.f16 q0, q1, q2 @ encoding: [0x44,0x0d,0x32,0xf2] 16 @ THUMB: vsub.f16 q0, q1, q2 @ encoding: [0x32,0xef,0x44,0x0d] 19 vmul.f16 q0, q1, q2 21 @ ARM: vmul.f16 q0, q1, q2 @ encoding: [0x54,0x0d,0x12,0xf3] 23 @ THUMB: vmul.f16 q0, q1, q2 @ encoding: [0x12,0xff,0x54,0x0d] 33 vmla.f16 q0, q1, q2 [all …]
|
D | thumb-neon-crypto.s | 19 sha1c.32 q0, q1, q2 20 @ CHECK: sha1c.32 q0, q1, q2 @ encoding: [0x02,0xef,0x44,0x0c] 21 sha1m.32 q0, q1, q2 22 @ CHECK: sha1m.32 q0, q1, q2 @ encoding: [0x22,0xef,0x44,0x0c] 23 sha1p.32 q0, q1, q2 24 @ CHECK: sha1p.32 q0, q1, q2 @ encoding: [0x12,0xef,0x44,0x0c] 25 sha1su0.32 q0, q1, q2 26 @ CHECK: sha1su0.32 q0, q1, q2 @ encoding: [0x32,0xef,0x44,0x0c] 27 sha256h.32 q0, q1, q2 28 @ CHECK: sha256h.32 q0, q1, q2 @ encoding: [0x02,0xff,0x44,0x0c] [all …]
|
D | vmov-vmvn-illegal-cases.s | 7 @ CHECK: vmov.i32 q2, #0xffffffab 9 @ CHECK: vmov.i16 q2, #0xffab 11 @ CHECK: vmov.i16 q2, #0xffab 16 @ CHECK: vmvn.i32 q2, #0xffffffab 18 @ CHECK: vmvn.i16 q2, #0xffab 20 @ CHECK: vmvn.i16 q2, #0xffab 23 vmov.i32 q2, #0xffffffab 24 vmov.i16 q2, #0xffab 25 vmov.i16 q2, #0xffab 28 vmvn.i32 q2, #0xffffffab [all …]
|
D | neon-crypto.s | 27 sha1c.32 q0, q1, q2 28 sha1m.32 q0, q1, q2 29 sha1p.32 q0, q1, q2 30 sha1su0.32 q0, q1, q2 31 sha256h.32 q0, q1, q2 32 sha256h2.32 q0, q1, q2 33 sha256su1.32 q0, q1, q2 34 @ CHECK: sha1c.32 q0, q1, q2 @ encoding: [0x44,0x0c,0x02,0xf2] 35 @ CHECK: sha1m.32 q0, q1, q2 @ encoding: [0x44,0x0c,0x22,0xf2] 36 @ CHECK: sha1p.32 q0, q1, q2 @ encoding: [0x44,0x0c,0x12,0xf2] [all …]
|
D | neon-minmax-encoding.s | 19 vmax.s8 q1, q2, q3 27 vmax.s8 q2, q3 30 vmax.u8 q11, q2 33 vmax.f32 q2, q1 49 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x46,0x26,0x04,0xf2] 56 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x46,0x46,0x04,0xf2] 59 @ CHECK: vmax.u8 q11, q11, q2 @ encoding: [0xc4,0x66,0x46,0xf3] 62 @ CHECK: vmax.f32 q2, q2, q1 @ encoding: [0x42,0x4f,0x04,0xf2] 81 vmin.s8 q1, q2, q3 89 vmin.s8 q2, q3 [all …]
|
D | neont2-minmax-encoding.s | 21 vmax.s8 q1, q2, q3 29 vmax.s8 q2, q3 32 vmax.u8 q11, q2 35 vmax.f32 q2, q1 51 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x04,0xef,0x46,0x26] 58 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x04,0xef,0x46,0x46] 61 @ CHECK: vmax.u8 q11, q11, q2 @ encoding: [0x46,0xff,0xc4,0x66] 64 @ CHECK: vmax.f32 q2, q2, q1 @ encoding: [0x04,0xef,0x42,0x4f] 83 vmin.s8 q1, q2, q3 91 vmin.s8 q2, q3 [all …]
|
D | fullfp16-neon-neg.s | 7 vadd.f16 q0, q1, q2 12 vsub.f16 q0, q1, q2 17 vmul.f16 q0, q1, q2 27 vmla.f16 q0, q1, q2 37 vmls.f16 q0, q1, q2 47 vfma.f16 q0, q1, q2 52 vfms.f16 q0, q1, q2 57 vceq.f16 q2, q3, q4 62 vceq.f16 q2, q3, #0 67 vcge.f16 q2, q3, q4 [all …]
|
/external/eigen/test/ |
D | geo_quaternion.cpp | 72 Quaternionx q1, q2; in quaternion() local 73 q2.setIdentity(); in quaternion() 74 VERIFY_IS_APPROX(Quaternionx(Quaternionx::Identity()).coeffs(), q2.coeffs()); in quaternion() 76 VERIFY_IS_APPROX(q1.coeffs(), (q1*q2).coeffs()); in quaternion() 81 ss << q2; in quaternion() 86 q1 *= q2; in quaternion() 89 q2 = AngleAxisx(a, v1.normalized()); in quaternion() 92 Scalar refangle = abs(AngleAxisx(q1.inverse()*q2).angle()); in quaternion() 96 if((q1.coeffs()-q2.coeffs()).norm() > Scalar(10)*largeEps) in quaternion() 98 VERIFY_IS_MUCH_SMALLER_THAN(abs(q1.angularDistance(q2) - refangle), Scalar(1)); in quaternion() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|
/external/freetype/src/base/ |
D | ftbbox.c | 268 FT_Pos q2, in cubic_peak() argument 286 FT_ABS( q2 ) | in cubic_peak() 297 q2 *= 1 << shift; in cubic_peak() 304 q2 >>= -shift; in cubic_peak() 311 while ( q2 > 0 || q3 > 0 ) in cubic_peak() 314 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 317 q3 = q3 + q2; in cubic_peak() 318 q2 = q2 + q1; in cubic_peak() 320 q3 = q3 + q2; in cubic_peak() 323 q2 = q2 >> 1; in cubic_peak() [all …]
|
/external/apache-commons-math/src/main/java/org/apache/commons/math/geometry/ |
D | Rotation.java | 110 private final double q2; field in Rotation 133 public Rotation(double q0, double q1, double q2, double q3, in Rotation() argument 138 double inv = 1.0 / FastMath.sqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3); in Rotation() 141 q2 *= inv; in Rotation() 147 this.q2 = q2; in Rotation() 185 q2 = coeff * axis.getY(); in Rotation() 261 q2 = inv * (ort[2][0] - ort[0][2]); in Rotation() 270 q2 = inv * (ort[0][1] + ort[1][0]); in Rotation() 276 q2 = 0.5 * FastMath.sqrt(s + 1.0); in Rotation() 277 double inv = 0.25 / q2; in Rotation() [all …]
|
/external/libvpx/vpx_dsp/ |
D | loopfilter.c | 36 uint8_t q1, uint8_t q2, uint8_t q3) { in filter_mask() argument 42 mask |= (abs(q2 - q1) > limit) * -1; in filter_mask() 43 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask() 50 uint8_t q2, uint8_t q3) { in flat_mask4() argument 55 mask |= (abs(q2 - q0) > thresh) * -1; in flat_mask4() 63 uint8_t q1, uint8_t q2, uint8_t q3, in flat_mask5() argument 65 int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); in flat_mask5() 121 const uint8_t q0 = s[0 * pitch], q1 = s[1 * pitch], q2 = s[2 * pitch], in vpx_lpf_horizontal_4_c() local 124 filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_c() 146 const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; in vpx_lpf_vertical_4_c() local [all …]
|
/external/llvm/test/MC/Hexagon/ |
D | v60-vcmp.s | 5 #CHECK: 1c81f142 { q2 |= vcmp.eq(v17.b{{ *}},{{ *}}v1.b) } 6 q2|=vcmp.eq(v17.b,v1.b) 8 #CHECK: 1c84fb2a { q2 &= vcmp.gt(v27.uw{{ *}},{{ *}}v4.uw) } 9 q2&=vcmp.gt(v27.uw,v4.uw) 11 #CHECK: 1c8cf826 { q2 &= vcmp.gt(v24.uh{{ *}},{{ *}}v12.uh) } 12 q2&=vcmp.gt(v24.uh,v12.uh) 17 #CHECK: 1c9aed1a { q2 &= vcmp.gt(v13.w{{ *}},{{ *}}v26.w) } 18 q2&=vcmp.gt(v13.w,v26.w) 20 #CHECK: 1c8de516 { q2 &= vcmp.gt(v5.h{{ *}},{{ *}}v13.h) } 21 q2&=vcmp.gt(v5.h,v13.h) [all …]
|
/external/libvpx/vp8/common/arm/neon/ |
D | dequant_idct_neon.c | 27 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local 58 q2 = vreinterpretq_s16_u16( in vp8_dequant_idct_add_neon() 61 d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2)); in vp8_dequant_idct_add_neon() 62 d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2)); in vp8_dequant_idct_add_neon() 64 q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2)); in vp8_dequant_idct_add_neon() 66 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon() 67 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon() 71 q4 = vqaddq_s16(q4, q2); in vp8_dequant_idct_add_neon() 89 q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]); in vp8_dequant_idct_add_neon() 91 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon() [all …]
|
/external/renderscript-intrinsics-replacement-toolkit/renderscript-toolkit/src/main/cpp/ |
D | Blend_neon.S | 63 vmov.i8 q2, #0 71 vmov q2, q10 89 vmull.u8 q2, d14, d4 97 vrshrn.u16 d12, q2, #8 106 vaddw.u8 q2, d12 115 vrshrn.u16 d4, q2, #8 122 vqadd.u8 q2, q10 143 vrshrn.u16 d12, q2, #8 168 vqadd.u8 q2, q10 179 vmull.u8 q2, d6, d20 [all …]
|
/external/libaom/aom_dsp/ |
D | loopfilter.c | 47 uint8_t q1, uint8_t q2, uint8_t q3) { in filter_mask() argument 53 mask |= (abs(q2 - q1) > limit) * -1; in filter_mask() 54 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask() 61 uint8_t q0, uint8_t q1, uint8_t q2) { in filter_mask3_chroma() argument 66 mask |= (abs(q2 - q1) > limit) * -1; in filter_mask3_chroma() 73 uint8_t q2) { in flat_mask3_chroma() argument 78 mask |= (abs(q2 - q0) > thresh) * -1; in flat_mask3_chroma() 84 uint8_t q2, uint8_t q3) { in flat_mask4() argument 89 mask |= (abs(q2 - q0) > thresh) * -1; in flat_mask4() 207 const uint8_t q0 = *oq0, q1 = *oq1, q2 = *oq2; in filter6() local [all …]
|
/external/capstone/suite/MC/ARM/ |
D | neont2-minmax-encoding.s.cs | 16 0x04,0xef,0x46,0x26 = vmax.s8 q1, q2, q3 23 0x04,0xef,0x46,0x46 = vmax.s8 q2, q2, q3 26 0x46,0xff,0xc4,0x66 = vmax.u8 q11, q11, q2 29 0x04,0xef,0x42,0x4f = vmax.f32 q2, q2, q1 44 0x04,0xef,0x56,0x26 = vmin.s8 q1, q2, q3 51 0x04,0xef,0x56,0x46 = vmin.s8 q2, q2, q3 54 0x46,0xff,0xd4,0x66 = vmin.u8 q11, q11, q2 57 0x24,0xef,0x42,0x4f = vmin.f32 q2, q2, q1
|
D | neon-minmax-encoding.s.cs | 16 0x46,0x26,0x04,0xf2 = vmax.s8 q1, q2, q3 23 0x46,0x46,0x04,0xf2 = vmax.s8 q2, q2, q3 26 0xc4,0x66,0x46,0xf3 = vmax.u8 q11, q11, q2 29 0x42,0x4f,0x04,0xf2 = vmax.f32 q2, q2, q1 44 0x56,0x26,0x04,0xf2 = vmin.s8 q1, q2, q3 51 0x56,0x46,0x04,0xf2 = vmin.s8 q2, q2, q3 54 0xd4,0x66,0x46,0xf3 = vmin.u8 q11, q11, q2 57 0x42,0x4f,0x24,0xf2 = vmin.f32 q2, q2, q1
|
D | thumb-neon-crypto.s.cs | 9 0x02,0xef,0x44,0x0c = sha1c.32 q0, q1, q2 10 0x22,0xef,0x44,0x0c = sha1m.32 q0, q1, q2 11 0x12,0xef,0x44,0x0c = sha1p.32 q0, q1, q2 12 0x32,0xef,0x44,0x0c = sha1su0.32 q0, q1, q2 13 0x02,0xff,0x44,0x0c = sha256h.32 q0, q1, q2 14 0x12,0xff,0x44,0x0c = sha256h2.32 q0, q1, q2 15 0x22,0xff,0x44,0x0c = sha256su1.32 q0, q1, q2
|
D | neon-crypto.s.cs | 9 0x44,0x0c,0x02,0xf2 = sha1c.32 q0, q1, q2 10 0x44,0x0c,0x22,0xf2 = sha1m.32 q0, q1, q2 11 0x44,0x0c,0x12,0xf2 = sha1p.32 q0, q1, q2 12 0x44,0x0c,0x32,0xf2 = sha1su0.32 q0, q1, q2 13 0x44,0x0c,0x02,0xf3 = sha256h.32 q0, q1, q2 14 0x44,0x0c,0x12,0xf3 = sha256h2.32 q0, q1, q2 15 0x44,0x0c,0x22,0xf3 = sha256su1.32 q0, q1, q2
|
/external/libvpx/vpx_dsp/loongarch/ |
D | loopfilter_8_lsx.c | 19 __m128i p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_lsx() local 33 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2); in vpx_lpf_horizontal_8_lsx() 40 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_lsx() 42 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_lsx() 55 DUP4_ARG2(__lsx_vsllwil_hu_bu, q0, 0, q1, 0, q2, 0, q3, 0, q0_l, q1_l, q2_l, in vpx_lpf_horizontal_8_lsx() 66 q1_out = __lsx_vilvl_d(q2, q1_out); in vpx_lpf_horizontal_8_lsx() 89 __m128i p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_dual_lsx() local 106 DUP2_ARG2(__lsx_vldx, dst, stride, dst, stride2, q1, q2); in vpx_lpf_horizontal_8_dual_lsx() 122 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_dual_lsx() 124 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_dual_lsx() [all …]
|
/external/libvpx/vpx_dsp/mips/ |
D | loopfilter_8_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_msa() local 27 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_msa() 33 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_msa() 35 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_msa() 48 q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); in vpx_lpf_horizontal_8_msa() 63 q2_out = __msa_bmnz_v(q2, (v16u8)q2_filter8, flat); in vpx_lpf_horizontal_8_msa() 86 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_dual_msa() local 96 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_dual_msa() 111 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_dual_msa() 113 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_dual_msa() [all …]
|