/external/valgrind/none/tests/arm/ |
D | neon128.c | 357 TESTINSN_imm("vmov.i8 q2", q2, 0x7); in main() 371 TESTINSN_imm("vmvn.i8 q2", q2, 0x7); in main() 382 TESTINSN_imm("vorr.i16 q2", q2, 0x7); in main() 412 TESTINSN_bin("vadd.i32 q0, q1, q2", q0, q1, i32, 24, q2, i32, 120); in main() 413 TESTINSN_bin("vadd.i64 q0, q1, q2", q0, q1, i32, 140, q2, i32, 120); in main() 414 TESTINSN_bin("vadd.i32 q0, q1, q2", q0, q1, i32, 140, q2, i32, 120); in main() 415 TESTINSN_bin("vadd.i16 q0, q1, q2", q0, q1, i32, 140, q2, i32, 120); in main() 416 TESTINSN_bin("vadd.i8 q0, q1, q2", q0, q1, i32, 140, q2, i32, 120); in main() 417 TESTINSN_bin("vadd.i8 q0, q1, q2", q0, q1, i32, (1 << 31) + 1, q2, i32, (1 << 31) + 2); in main() 418 TESTINSN_bin("vadd.i16 q0, q1, q2", q0, q1, i32, (1 << 31) + 1, q2, i32, (1 << 31) + 2); in main() [all …]
|
D | neon128.stdout.exp | 95 vadd.i32 q0, q1, q2 :: Qd 0x00000090 0x00000090 0x00000090 0x00000090 Qm (i32)0x00000018 Qn (i32)… 96 vadd.i64 q0, q1, q2 :: Qd 0x00000104 0x00000104 0x00000104 0x00000104 Qm (i32)0x0000008c Qn (i32)… 97 vadd.i32 q0, q1, q2 :: Qd 0x00000104 0x00000104 0x00000104 0x00000104 Qm (i32)0x0000008c Qn (i32)… 98 vadd.i16 q0, q1, q2 :: Qd 0x00000104 0x00000104 0x00000104 0x00000104 Qm (i32)0x0000008c Qn (i32)… 99 vadd.i8 q0, q1, q2 :: Qd 0x00000004 0x00000004 0x00000004 0x00000004 Qm (i32)0x0000008c Qn (i32)0… 100 vadd.i8 q0, q1, q2 :: Qd 0x00000003 0x00000003 0x00000003 0x00000003 Qm (i32)0x80000001 Qn (i32)0… 101 vadd.i16 q0, q1, q2 :: Qd 0x00000003 0x00000003 0x00000003 0x00000003 Qm (i32)0x80000001 Qn (i32)… 102 vadd.i32 q0, q1, q2 :: Qd 0x00000003 0x00000003 0x00000003 0x00000003 Qm (i32)0x80000001 Qn (i32)… 103 vadd.i64 q0, q1, q2 :: Qd 0x00000004 0x00000003 0x00000004 0x00000003 Qm (i32)0x80000001 Qn (i32)… 107 vsub.i32 q0, q1, q2 :: Qd 0xffffffa0 0xffffffa0 0xffffffa0 0xffffffa0 Qm (i32)0x00000018 Qn (i32)… [all …]
|
/external/llvm/test/MC/ARM/ |
D | vorr-vbic-illegal-cases.s | 5 vorr.i32 q2, #0xffffffff 7 vorr.i32 q2, #0xabababab 8 vorr.i16 q2, #0xabab 9 vorr.i16 q2, #0xabab 14 @ CHECK: vorr.i32 q2, #0xffffffff 18 @ CHECK: vorr.i32 q2, #0xabababab 20 @ CHECK: vorr.i16 q2, #0xabab 22 @ CHECK: vorr.i16 q2, #0xabab 25 vbic.i32 q2, #0xffffffff 27 vbic.i32 q2, #0xabababab [all …]
|
D | vmov-vmvn-byte-replicate.s | 6 @ CHECK: vmov.i8 q2, #0xff @ encoding: [0x5f,0x4e,0x87,0xf3] 8 @ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3] 9 @ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3] 10 @ CHECK: vmov.i8 q2, #0xab @ encoding: [0x5b,0x4e,0x82,0xf3] 13 @ CHECK: vmov.i8 q2, #0x0 @ encoding: [0x50,0x4e,0x80,0xf2] 15 @ CHECK: vmov.i8 q2, #0x54 @ encoding: [0x54,0x4e,0x85,0xf2] 17 @ CHECK: vmov.i8 q2, #0x54 @ encoding: [0x54,0x4e,0x85,0xf2] 20 vmov.i32 q2, #0xffffffff 22 vmov.i32 q2, #0xabababab 23 vmov.i16 q2, #0xabab [all …]
|
D | fullfp16-neon.s | 5 vadd.f16 q0, q1, q2 7 @ ARM: vadd.f16 q0, q1, q2 @ encoding: [0x44,0x0d,0x12,0xf2] 9 @ THUMB: vadd.f16 q0, q1, q2 @ encoding: [0x12,0xef,0x44,0x0d] 12 vsub.f16 q0, q1, q2 14 @ ARM: vsub.f16 q0, q1, q2 @ encoding: [0x44,0x0d,0x32,0xf2] 16 @ THUMB: vsub.f16 q0, q1, q2 @ encoding: [0x32,0xef,0x44,0x0d] 19 vmul.f16 q0, q1, q2 21 @ ARM: vmul.f16 q0, q1, q2 @ encoding: [0x54,0x0d,0x12,0xf3] 23 @ THUMB: vmul.f16 q0, q1, q2 @ encoding: [0x12,0xff,0x54,0x0d] 33 vmla.f16 q0, q1, q2 [all …]
|
D | thumb-neon-crypto.s | 19 sha1c.32 q0, q1, q2 20 @ CHECK: sha1c.32 q0, q1, q2 @ encoding: [0x02,0xef,0x44,0x0c] 21 sha1m.32 q0, q1, q2 22 @ CHECK: sha1m.32 q0, q1, q2 @ encoding: [0x22,0xef,0x44,0x0c] 23 sha1p.32 q0, q1, q2 24 @ CHECK: sha1p.32 q0, q1, q2 @ encoding: [0x12,0xef,0x44,0x0c] 25 sha1su0.32 q0, q1, q2 26 @ CHECK: sha1su0.32 q0, q1, q2 @ encoding: [0x32,0xef,0x44,0x0c] 27 sha256h.32 q0, q1, q2 28 @ CHECK: sha256h.32 q0, q1, q2 @ encoding: [0x02,0xff,0x44,0x0c] [all …]
|
D | vmov-vmvn-illegal-cases.s | 7 @ CHECK: vmov.i32 q2, #0xffffffab 9 @ CHECK: vmov.i16 q2, #0xffab 11 @ CHECK: vmov.i16 q2, #0xffab 16 @ CHECK: vmvn.i32 q2, #0xffffffab 18 @ CHECK: vmvn.i16 q2, #0xffab 20 @ CHECK: vmvn.i16 q2, #0xffab 23 vmov.i32 q2, #0xffffffab 24 vmov.i16 q2, #0xffab 25 vmov.i16 q2, #0xffab 28 vmvn.i32 q2, #0xffffffab [all …]
|
D | neon-crypto.s | 27 sha1c.32 q0, q1, q2 28 sha1m.32 q0, q1, q2 29 sha1p.32 q0, q1, q2 30 sha1su0.32 q0, q1, q2 31 sha256h.32 q0, q1, q2 32 sha256h2.32 q0, q1, q2 33 sha256su1.32 q0, q1, q2 34 @ CHECK: sha1c.32 q0, q1, q2 @ encoding: [0x44,0x0c,0x02,0xf2] 35 @ CHECK: sha1m.32 q0, q1, q2 @ encoding: [0x44,0x0c,0x22,0xf2] 36 @ CHECK: sha1p.32 q0, q1, q2 @ encoding: [0x44,0x0c,0x12,0xf2] [all …]
|
D | neont2-minmax-encoding.s | 21 vmax.s8 q1, q2, q3 29 vmax.s8 q2, q3 32 vmax.u8 q11, q2 35 vmax.f32 q2, q1 51 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x04,0xef,0x46,0x26] 58 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x04,0xef,0x46,0x46] 61 @ CHECK: vmax.u8 q11, q11, q2 @ encoding: [0x46,0xff,0xc4,0x66] 64 @ CHECK: vmax.f32 q2, q2, q1 @ encoding: [0x04,0xef,0x42,0x4f] 83 vmin.s8 q1, q2, q3 91 vmin.s8 q2, q3 [all …]
|
D | neon-minmax-encoding.s | 19 vmax.s8 q1, q2, q3 27 vmax.s8 q2, q3 30 vmax.u8 q11, q2 33 vmax.f32 q2, q1 49 @ CHECK: vmax.s8 q1, q2, q3 @ encoding: [0x46,0x26,0x04,0xf2] 56 @ CHECK: vmax.s8 q2, q2, q3 @ encoding: [0x46,0x46,0x04,0xf2] 59 @ CHECK: vmax.u8 q11, q11, q2 @ encoding: [0xc4,0x66,0x46,0xf3] 62 @ CHECK: vmax.f32 q2, q2, q1 @ encoding: [0x42,0x4f,0x04,0xf2] 81 vmin.s8 q1, q2, q3 89 vmin.s8 q2, q3 [all …]
|
D | fullfp16-neon-neg.s | 7 vadd.f16 q0, q1, q2 12 vsub.f16 q0, q1, q2 17 vmul.f16 q0, q1, q2 27 vmla.f16 q0, q1, q2 37 vmls.f16 q0, q1, q2 47 vfma.f16 q0, q1, q2 52 vfms.f16 q0, q1, q2 57 vceq.f16 q2, q3, q4 62 vceq.f16 q2, q3, #0 67 vcge.f16 q2, q3, q4 [all …]
|
/external/eigen/test/ |
D | geo_quaternion.cpp | 71 Quaternionx q1, q2; in quaternion() local 72 q2.setIdentity(); in quaternion() 73 VERIFY_IS_APPROX(Quaternionx(Quaternionx::Identity()).coeffs(), q2.coeffs()); in quaternion() 75 VERIFY_IS_APPROX(q1.coeffs(), (q1*q2).coeffs()); in quaternion() 78 q1 *= q2; in quaternion() 81 q2 = AngleAxisx(a, v1.normalized()); in quaternion() 84 Scalar refangle = abs(AngleAxisx(q1.inverse()*q2).angle()); in quaternion() 88 if((q1.coeffs()-q2.coeffs()).norm() > 10*largeEps) in quaternion() 90 VERIFY_IS_MUCH_SMALLER_THAN(abs(q1.angularDistance(q2) - refangle), Scalar(1)); in quaternion() 95 VERIFY_IS_APPROX(q1 * q2 * v2, in quaternion() [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | thumb-big-stack.ll | 145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… 163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q… [all …]
|
/external/v8/src/base/ |
D | division-by-constant.cc | 28 T q2 = min / ad; // Init. q2 = 2**p/|d|. in SignedDivisionByConstant() local 29 T r2 = min - q2 * ad; // Init. r2 = rem(2**p, |d|). in SignedDivisionByConstant() 39 q2 = 2 * q2; // Update q2 = 2**p/|d|. in SignedDivisionByConstant() 42 q2 = q2 + 1; in SignedDivisionByConstant() 47 T mul = q2 + 1; in SignedDivisionByConstant() 66 T q2 = max / d; // Init. q2 = (2**p - 1)/d. in UnsignedDivisionByConstant() local 67 T r2 = max - q2 * d; // Init. r2 = rem(2**p - 1, d). in UnsignedDivisionByConstant() 79 if (q2 >= max) a = true; in UnsignedDivisionByConstant() 80 q2 = 2 * q2 + 1; in UnsignedDivisionByConstant() 83 if (q2 >= min) a = true; in UnsignedDivisionByConstant() [all …]
|
/external/freetype/src/base/ |
D | ftbbox.c | 251 FT_Pos q2, in cubic_peak() argument 269 FT_ABS( q2 ) | in cubic_peak() 280 q2 <<= shift; in cubic_peak() 287 q2 >>= -shift; in cubic_peak() 294 while ( q2 > 0 || q3 > 0 ) in cubic_peak() 297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak() 300 q3 = q3 + q2; in cubic_peak() 301 q2 = q2 + q1; in cubic_peak() 303 q3 = q3 + q2; in cubic_peak() 306 q2 = q2 / 2; in cubic_peak() [all …]
|
/external/apache-commons-math/src/main/java/org/apache/commons/math/geometry/ |
D | Rotation.java | 110 private final double q2; field in Rotation 133 public Rotation(double q0, double q1, double q2, double q3, in Rotation() argument 138 double inv = 1.0 / FastMath.sqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3); in Rotation() 141 q2 *= inv; in Rotation() 147 this.q2 = q2; in Rotation() 185 q2 = coeff * axis.getY(); in Rotation() 261 q2 = inv * (ort[2][0] - ort[0][2]); in Rotation() 270 q2 = inv * (ort[0][1] + ort[1][0]); in Rotation() 276 q2 = 0.5 * FastMath.sqrt(s + 1.0); in Rotation() 277 double inv = 0.25 / q2; in Rotation() [all …]
|
/external/libvpx/libvpx/vpx_dsp/ |
D | loopfilter.c | 36 uint8_t q1, uint8_t q2, uint8_t q3) { in filter_mask() argument 42 mask |= (abs(q2 - q1) > limit) * -1; in filter_mask() 43 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask() 50 uint8_t q2, uint8_t q3) { in flat_mask4() argument 55 mask |= (abs(q2 - q0) > thresh) * -1; in flat_mask4() 63 uint8_t q1, uint8_t q2, uint8_t q3, in flat_mask5() argument 65 int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); in flat_mask5() 121 const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; in vpx_lpf_horizontal_4_c() local 123 filter_mask(*limit, *blimit, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_c() 145 const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; in vpx_lpf_vertical_4_c() local [all …]
|
/external/llvm/test/MC/Hexagon/ |
D | v60-vcmp.s | 5 #CHECK: 1c81f142 { q2 |= vcmp.eq(v17.b{{ *}},{{ *}}v1.b) } 6 q2|=vcmp.eq(v17.b,v1.b) 8 #CHECK: 1c84fb2a { q2 &= vcmp.gt(v27.uw{{ *}},{{ *}}v4.uw) } 9 q2&=vcmp.gt(v27.uw,v4.uw) 11 #CHECK: 1c8cf826 { q2 &= vcmp.gt(v24.uh{{ *}},{{ *}}v12.uh) } 12 q2&=vcmp.gt(v24.uh,v12.uh) 17 #CHECK: 1c9aed1a { q2 &= vcmp.gt(v13.w{{ *}},{{ *}}v26.w) } 18 q2&=vcmp.gt(v13.w,v26.w) 20 #CHECK: 1c8de516 { q2 &= vcmp.gt(v5.h{{ *}},{{ *}}v13.h) } 21 q2&=vcmp.gt(v5.h,v13.h) [all …]
|
/external/libvpx/libvpx/vp8/common/arm/neon/ |
D | dequant_idct_neon.c | 27 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local 58 q2 = vreinterpretq_s16_u16( in vp8_dequant_idct_add_neon() 61 d12 = vqadd_s16(vget_low_s16(q1), vget_low_s16(q2)); in vp8_dequant_idct_add_neon() 62 d13 = vqsub_s16(vget_low_s16(q1), vget_low_s16(q2)); in vp8_dequant_idct_add_neon() 64 q2 = vcombine_s16(vget_high_s16(q1), vget_high_s16(q2)); in vp8_dequant_idct_add_neon() 66 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon() 67 q4 = vqdmulhq_n_s16(q2, cospi8sqrt2minus1); in vp8_dequant_idct_add_neon() 71 q4 = vqaddq_s16(q4, q2); in vp8_dequant_idct_add_neon() 89 q2 = vcombine_s16(d2tmp2.val[1], d2tmp3.val[1]); in vp8_dequant_idct_add_neon() 91 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | loopfilter_8_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_msa() local 27 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_msa() 33 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_msa() 35 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_msa() 48 q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, q2_r, q3_r); in vpx_lpf_horizontal_8_msa() 63 q2_out = __msa_bmnz_v(q2, (v16u8)q2_filter8, flat); in vpx_lpf_horizontal_8_msa() 86 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_dual_msa() local 96 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_dual_msa() 111 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, hev, in vpx_lpf_horizontal_8_dual_msa() 113 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_dual_msa() [all …]
|
/external/capstone/suite/MC/ARM/ |
D | neon-minmax-encoding.s.cs | 16 0x46,0x26,0x04,0xf2 = vmax.s8 q1, q2, q3 23 0x46,0x46,0x04,0xf2 = vmax.s8 q2, q2, q3 26 0xc4,0x66,0x46,0xf3 = vmax.u8 q11, q11, q2 29 0x42,0x4f,0x04,0xf2 = vmax.f32 q2, q2, q1 44 0x56,0x26,0x04,0xf2 = vmin.s8 q1, q2, q3 51 0x56,0x46,0x04,0xf2 = vmin.s8 q2, q2, q3 54 0xd4,0x66,0x46,0xf3 = vmin.u8 q11, q11, q2 57 0x42,0x4f,0x24,0xf2 = vmin.f32 q2, q2, q1
|
D | neont2-minmax-encoding.s.cs | 16 0x04,0xef,0x46,0x26 = vmax.s8 q1, q2, q3 23 0x04,0xef,0x46,0x46 = vmax.s8 q2, q2, q3 26 0x46,0xff,0xc4,0x66 = vmax.u8 q11, q11, q2 29 0x04,0xef,0x42,0x4f = vmax.f32 q2, q2, q1 44 0x04,0xef,0x56,0x26 = vmin.s8 q1, q2, q3 51 0x04,0xef,0x56,0x46 = vmin.s8 q2, q2, q3 54 0x46,0xff,0xd4,0x66 = vmin.u8 q11, q11, q2 57 0x24,0xef,0x42,0x4f = vmin.f32 q2, q2, q1
|
D | neon-crypto.s.cs | 9 0x44,0x0c,0x02,0xf2 = sha1c.32 q0, q1, q2 10 0x44,0x0c,0x22,0xf2 = sha1m.32 q0, q1, q2 11 0x44,0x0c,0x12,0xf2 = sha1p.32 q0, q1, q2 12 0x44,0x0c,0x32,0xf2 = sha1su0.32 q0, q1, q2 13 0x44,0x0c,0x02,0xf3 = sha256h.32 q0, q1, q2 14 0x44,0x0c,0x12,0xf3 = sha256h2.32 q0, q1, q2 15 0x44,0x0c,0x22,0xf3 = sha256su1.32 q0, q1, q2
|
D | thumb-neon-crypto.s.cs | 9 0x02,0xef,0x44,0x0c = sha1c.32 q0, q1, q2 10 0x22,0xef,0x44,0x0c = sha1m.32 q0, q1, q2 11 0x12,0xef,0x44,0x0c = sha1p.32 q0, q1, q2 12 0x32,0xef,0x44,0x0c = sha1su0.32 q0, q1, q2 13 0x02,0xff,0x44,0x0c = sha256h.32 q0, q1, q2 14 0x12,0xff,0x44,0x0c = sha256h2.32 q0, q1, q2 15 0x22,0xff,0x44,0x0c = sha256su1.32 q0, q1, q2
|
/external/boringssl/ios-arm/crypto/fipsmodule/ |
D | aesv8-armx32.S | 42 vld1.32 {q1,q2},[r3]! 108 vsub.i8 q2,q2,q10 @ adjust the mask 237 vld1.8 {q2},[r0] 242 .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 243 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 246 .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 247 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 251 .byte 0x00,0x43,0xb0,0xf3 @ aese q2,q0 252 .byte 0x84,0x43,0xb0,0xf3 @ aesmc q2,q2 254 .byte 0x02,0x43,0xb0,0xf3 @ aese q2,q1 [all …]
|