Home
last modified time | relevance | path

Searched refs:q3 (Results 1 – 25 of 219) sorted by relevance

123456789

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s110 veor q4, q7, q3
111 veor.8 q4, q7, q3
112 veor.16 q4, q7, q3
113 veor.32 q4, q7, q3
114 veor.64 q4, q7, q3
116 veor.i8 q4, q7, q3
117 veor.i16 q4, q7, q3
118 veor.i32 q4, q7, q3
119 veor.i64 q4, q7, q3
121 veor.s8 q4, q7, q3
[all …]
/external/llvm/test/CodeGen/ARM/
Dthumb-big-stack.ll145 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
147 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
149 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
151 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
153 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
155 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
157 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
159 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
161 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
163 …tail call void asm sideeffect "", "~{q0}{q1}{q2}{q3}{q4}{q5}{q6}{q7}{q8}{q9}{q10}{q11}{q12}{q13}{q…
[all …]
/external/pdfium/third_party/freetype/src/base/
Dftbbox.c252 FT_Pos q3, in cubic_peak() argument
270 FT_ABS( q3 ) | in cubic_peak()
281 q3 <<= shift; in cubic_peak()
288 q3 >>= -shift; in cubic_peak()
294 while ( q2 > 0 || q3 > 0 ) in cubic_peak()
297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak()
299 q4 = q4 + q3; in cubic_peak()
300 q3 = q3 + q2; in cubic_peak()
302 q4 = q4 + q3; in cubic_peak()
303 q3 = q3 + q2; in cubic_peak()
[all …]
/external/freetype/src/base/
Dftbbox.c252 FT_Pos q3, in cubic_peak() argument
270 FT_ABS( q3 ) | in cubic_peak()
281 q3 <<= shift; in cubic_peak()
288 q3 >>= -shift; in cubic_peak()
294 while ( q2 > 0 || q3 > 0 ) in cubic_peak()
297 if ( q1 + q2 > q3 + q4 ) /* first half */ in cubic_peak()
299 q4 = q4 + q3; in cubic_peak()
300 q3 = q3 + q2; in cubic_peak()
302 q4 = q4 + q3; in cubic_peak()
303 q3 = q3 + q2; in cubic_peak()
[all …]
/external/boringssl/linux-arm/crypto/aes/
Daesv8-armx32.S37 vld1.8 {q3},[r0]!
47 vtbl.8 d20,{q3},d4
48 vtbl.8 d21,{q3},d5
49 vext.8 q9,q0,q3,#12
50 vst1.32 {q3},[r2]!
54 veor q3,q3,q9
56 veor q3,q3,q9
59 veor q3,q3,q9
61 veor q3,q3,q10
66 vtbl.8 d20,{q3},d4
[all …]
/external/apache-commons-math/src/main/java/org/apache/commons/math/geometry/
DRotation.java113 private final double q3; field in Rotation
133 public Rotation(double q0, double q1, double q2, double q3, in Rotation() argument
138 double inv = 1.0 / FastMath.sqrt(q0 * q0 + q1 * q1 + q2 * q2 + q3 * q3); in Rotation()
142 q3 *= inv; in Rotation()
148 this.q3 = q3; in Rotation()
186 q3 = coeff * axis.getZ(); in Rotation()
262 q3 = inv * (ort[0][1] - ort[1][0]); in Rotation()
271 q3 = inv * (ort[0][2] + ort[2][0]); in Rotation()
280 q3 = inv * (ort[2][1] + ort[1][2]); in Rotation()
284 q3 = 0.5 * FastMath.sqrt(s + 1.0); in Rotation()
[all …]
/external/boringssl/linux-arm/crypto/modes/
Dghashv8-armx32.S14 vext.8 q3,q9,q9,#8
18 vshr.u64 q10,q3,#63
21 vshl.i64 q3,q3,#1
24 vorr q3,q3,q10 @ H<<<=1
25 veor q12,q3,q8 @ twisted H
68 vext.8 q3,q9,q9,#8
70 .byte 0x86,0x0e,0xa8,0xf2 @ pmull q0,q12,q3 @ H.lo·Xi.lo
71 veor q9,q9,q3 @ Karatsuba pre-processing
72 .byte 0x87,0x4e,0xa9,0xf2 @ pmull2 q2,q12,q3 @ H.hi·Xi.hi
130 vext.8 q3,q8,q8,#8 @ rotate I[0]
[all …]
/external/libvpx/libvpx/vp8/common/arm/neon/
Ddequant_idct_neon.c24 int16x8_t q1, q2, q3, q4, q5, q6; in vp8_dequant_idct_add_neon() local
32 q3 = vld1q_s16(input); in vp8_dequant_idct_add_neon()
53 q1 = vreinterpretq_s16_u16(vmulq_u16(vreinterpretq_u16_s16(q3), in vp8_dequant_idct_add_neon()
63 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon()
66 q3 = vshrq_n_s16(q3, 1); in vp8_dequant_idct_add_neon()
69 q3 = vqaddq_s16(q3, q2); in vp8_dequant_idct_add_neon()
72 d10 = vqsub_s16(vget_low_s16(q3), vget_high_s16(q4)); in vp8_dequant_idct_add_neon()
73 d11 = vqadd_s16(vget_high_s16(q3), vget_low_s16(q4)); in vp8_dequant_idct_add_neon()
90 q3 = vqdmulhq_n_s16(q2, sinpi8sqrt2); in vp8_dequant_idct_add_neon()
96 q3 = vshrq_n_s16(q3, 1); in vp8_dequant_idct_add_neon()
[all …]
Didct_dequant_full_2x_neon.c24 int16x8_t q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11; in idct_dequant_full_2x_neon() local
41 q3 = vld1q_s16(q); in idct_dequant_full_2x_neon()
70 q3 = vmulq_s16(q3, q1); in idct_dequant_full_2x_neon()
82 dLow0 = vget_low_s16(q3); in idct_dequant_full_2x_neon()
83 dHigh0 = vget_high_s16(q3); in idct_dequant_full_2x_neon()
86 q3 = vcombine_s16(dLow0, dLow1); in idct_dequant_full_2x_neon()
94 q10 = vqaddq_s16(q2, q3); in idct_dequant_full_2x_neon()
95 q11 = vqsubq_s16(q2, q3); in idct_dequant_full_2x_neon()
104 q3 = vqaddq_s16(q7, q4); in idct_dequant_full_2x_neon()
106 q4 = vqaddq_s16(q10, q3); in idct_dequant_full_2x_neon()
[all …]
Dvp8_loopfilter_neon.c19 uint8x16_t q3, // p3 in vp8_loop_filter_neon() argument
37 q11u8 = vabdq_u8(q3, q4); in vp8_loop_filter_neon()
41 q3 = vabdq_u8(q9, q8); in vp8_loop_filter_neon()
46 q3 = vmaxq_u8(q3, q4); in vp8_loop_filter_neon()
54 q15u8 = vmaxq_u8(q15u8, q3); in vp8_loop_filter_neon()
133 uint8x16_t qblimit, qlimit, qthresh, q3, q4; in vp8_loop_filter_horizontal_edge_y_neon() local
141 q3 = vld1q_u8(src); in vp8_loop_filter_horizontal_edge_y_neon()
157 vp8_loop_filter_neon(qblimit, qlimit, qthresh, q3, q4, in vp8_loop_filter_horizontal_edge_y_neon()
179 uint8x16_t qblimit, qlimit, qthresh, q3, q4; in vp8_loop_filter_horizontal_edge_uv_neon() local
222 q3 = vcombine_u8(d6, d7); in vp8_loop_filter_horizontal_edge_uv_neon()
[all …]
/external/libvpx/libvpx/vpx_dsp/
Dloopfilter.c40 uint8_t q2, uint8_t q3) { in filter_mask() argument
47 mask |= (abs(q3 - q2) > limit) * -1; in filter_mask()
56 uint8_t q2, uint8_t q3) { in flat_mask4() argument
63 mask |= (abs(q3 - q0) > thresh) * -1; in flat_mask4()
72 uint8_t q3, uint8_t q4) { in flat_mask5() argument
73 int8_t mask = ~flat_mask4(thresh, p3, p2, p1, p0, q0, q1, q2, q3); in flat_mask5()
129 const uint8_t q0 = s[0 * p], q1 = s[1 * p], q2 = s[2 * p], q3 = s[3 * p]; in vpx_lpf_horizontal_4_c() local
131 p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_c()
154 const uint8_t q0 = s[0], q1 = s[1], q2 = s[2], q3 = s[3]; in vpx_lpf_vertical_4_c() local
156 p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_c()
[all …]
/external/libavc/common/arm/
Dih264_padding_neon.s185 vdup.u8 q3, r11
188 vst1.8 {q3}, [r4], r1 @ 16 bytes store
197 vdup.u8 q3, r11
200 vst1.8 {q3}, [r4], r1 @ 16 bytes store
217 vdup.u8 q3, r11
220 vst1.8 {q3}, [r4]! @ 16 bytes store
223 vst1.8 {q3}, [r4], r6 @ 16 bytes store
233 vdup.u8 q3, r11
236 vst1.8 {q3}, [r4]! @ 16 bytes store
237 vst1.8 {q3}, [r4], r6 @ 16 bytes store
[all …]
Dih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s153 vaddl.u8 q3, d0, d5
159 vmla.u16 q3, q4, q11
163 vmls.u16 q3, q4, q12
170 vst1.32 {q3}, [r9], r6 @ store temp buffer 0
249 vaddl.s16 q3, d7, d17
255 vmlal.s16 q3, d31, d22
256 vmlsl.s16 q3, d29, d24
262 vqrshrun.s32 d19, q3, #10
279 vaddl.s16 q3, d9, d21
291 vmlal.s16 q3, d31, d22
[all …]
Dih264_weighted_pred_a9q.s142 vmovl.u8 q3, d6 @converting rows 3,4 to 16-bit
145 vmul.s16 q3, q3, d2[0] @weight mult. for rows 3,4
149 vrshl.s16 q3, q3, q0 @rounds off the weighted samples from rows 3,4
152 vaddw.s8 q3, q3, d3 @adding offset for rows 3,4
155 vqmovun.s16 d6, q3 @saturating rows 3,4 to unsigned 8-bit
173 vmovl.u8 q3, d6 @converting row 2 to 16-bit
178 vmul.s16 q3, q3, d2[0] @weight mult. for row 2
183 vrshl.s16 q3, q3, q0 @rounds off the weighted samples from row 2
187 vaddw.s8 q3, q3, d3 @adding offset for row 2
192 vqmovun.s16 d6, q3 @saturating row 2 to unsigned 8-bit
[all …]
/external/libjpeg-turbo/simd/
Djsimd_arm_neon.S108 INT32 q1, q2, q3, q4, q5, q6, q7; \
130 q3 = ((INT32) row0 - (INT32) row4) << 13; \
134 q1 = q3 + q2; \
152 q3 = q3 - q2; \
168 tmp12 = q3; \
252 vmul.s16 q11, q11, q3
260 vmul.s16 q15, q15, q3
271 vsubl.s16 q3, ROW0L, ROW4L
280 vshl.s32 q3, q3, #13
284 vadd.s32 q1, q3, q2
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dloopfilter_4_msa.c20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out; in vpx_lpf_horizontal_4_msa() local
25 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_msa()
31 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, in vpx_lpf_horizontal_4_msa()
50 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_4_dual_msa() local
53 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_dual_msa()
67 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0, in vpx_lpf_horizontal_4_dual_msa()
80 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_vertical_4_msa() local
85 LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_msa()
91 TRANSPOSE8x8_UB_UB(p3, p2, p1, p0, q0, q1, q2, q3, in vpx_lpf_vertical_4_msa()
92 p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_msa()
[all …]
Dloopfilter_masks_dspr2.h31 uint32_t q2, uint32_t q3, in filter_hev_mask_dspr2() argument
95 [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh) in filter_hev_mask_dspr2()
138 uint32_t q2, uint32_t q3, in filter_hev_mask_flatmask4_dspr2() argument
243 [q2] "r" (q2), [q3] "r" (q3), [thresh] "r" (thresh), in filter_hev_mask_flatmask4_dspr2()
286 uint32_t q3, uint32_t q4, in flatmask5() argument
362 [q2] "r" (q2), [q3] "r" (q3), [q4] "r" (q4), in flatmask5()
Dloopfilter_8_msa.c20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_msa() local
29 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_msa()
35 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, in vpx_lpf_horizontal_8_msa()
37 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_msa()
50 zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, in vpx_lpf_horizontal_8_msa()
93 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_dual_msa() local
103 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_dual_msa()
118 LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh, in vpx_lpf_horizontal_8_dual_msa()
120 VP9_FLAT4(p3, p2, p0, q0, q2, q3, flat); in vpx_lpf_horizontal_8_dual_msa()
127 zero, q2, zero, q3, p3_r, p2_r, p1_r, p0_r, q0_r, q1_r, in vpx_lpf_horizontal_8_dual_msa()
[all …]
/external/libhevc/common/arm/
Dihevc_intra_pred_luma_horz.s124 vdup.8 q3,d1[5] @duplicate the iii value.
133 vst1.8 {q3},[r2],r3
134 vst1.8 {q3},[r9],r3
140 vdup.8 q3,d1[1]
149 vst1.8 {q3},[r2],r3
150 vst1.8 {q3},[r9],r3
156 vdup.8 q3,d0[5]
165 vst1.8 {q3},[r2],r3
166 vst1.8 {q3},[r9],r3
172 vdup.8 q3,d0[1]
[all …]
Dihevc_intra_pred_chroma_horz.s126 vdup.16 q3,d1[1] @duplicate the iii value.
135 vst1.16 {q3},[r2],r3
136 vst1.16 {q3},[r9],r3
142 vdup.16 q3,d0[1]
151 vst1.16 {q3},[r2],r3
152 vst1.16 {q3},[r9],r3
158 vdup.16 q3,d11[1]
167 vst1.16 {q3},[r2],r3
168 vst1.16 {q3},[r9],r3
174 vdup.16 q3,d10[1]
[all …]
Dihevc_deblk_chroma_horz.s92 vsub.i16 q3,q0,q1
96 vshl.i16 q3,q3,#2
106 vadd.i16 q2,q3,q2
108 vsub.i16 q3,q2,q8
125 vrshr.s16 q3,q3,#3
133 vmin.s16 q8,q3,q2
/external/icu/icu4c/source/test/perf/howExpensiveIs/
Dsieve.cpp104 double qs(double *times, int n, double *q1, double *q2, double *q3) { in qs() argument
107 *q3 = medianof(times,n,3); in qs()
108 return *q3-*q1; in qs()
112 double q1,q2,q3; in uprv_getMeanTime() local
117 double iqr = qs(times,n,&q1,&q2,&q3); in uprv_getMeanTime()
119 double rangeMax = (q3+(1.5*iqr)); in uprv_getMeanTime()
124 printf("iqr: %.9f, q1=%.9f, q2=%.9f, q3=%.9f, max=%.9f, n=%d\n", iqr,q1,q2,q3,(double)-1, n); in uprv_getMeanTime()
148 double iqr = qs(times,n,&q1,&q2,&q3); in uprv_getMeanTime()
150 rangeMax = (q3+(1.5*iqr)); in uprv_getMeanTime()
178 printf("min: %.9f, q1=%.9f, q2=%.9f, q3=%.9f, max=%.9f, n=%d\n", minTime,q1,q2,q3,maxTime, n); in uprv_getMeanTime()
/external/boringssl/src/crypto/curve25519/asm/
Dx25519-asm-arm.S80 vshr.u64 q3,q2,#7
110 vand q5,q5,q3
111 vand q7,q7,q3
112 vand q9,q9,q3
113 vand q11,q11,q3
114 vand q3,q13,q3
145 vadd.i64 q3,q3,q13
147 vadd.i64 q14,q3,q0
176 vsub.i64 q3,q3,q10
187 vsub.i64 q3,q6,q7
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Dvpx_convolve_avg_neon_asm.asm37 vld1.8 {q2-q3}, [r0], lr
44 vrhadd.u8 q3, q3, q11
46 vst1.8 {q2-q3}, [r2@128], r4
53 vld1.8 {q2-q3}, [r0], r1
63 vrhadd.u8 q3, q3, q11
65 vst1.8 {q2-q3}, [r2@128], r3
74 vld1.8 {q3}, [r6@128], r3
80 vrhadd.u8 q1, q1, q3
/external/boringssl/src/crypto/chacha/
Dchacha_vec_arm.S157 vadd.i32 q3, q11, q0
210 veor q3, q3, q9
216 vrev32.16 q3, q3
218 vadd.i32 q8, q8, q3
247 veor q4, q9, q3
253 vshl.i32 q3, q4, #8
260 vsri.32 q3, q4, #24
268 vadd.i32 q4, q8, q3
308 vext.32 q3, q3, q3, #3
315 veor q3, q9, q3
[all …]

123456789