Home
last modified time | relevance | path

Searched refs:d7 (Results 1 – 25 of 280) sorted by relevance

12345678910>>...12

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s169 vand d4, d7, d3
170 vand.8 d4, d7, d3
171 vand.16 d4, d7, d3
172 vand.32 d4, d7, d3
173 vand.64 d4, d7, d3
175 vand.i8 d4, d7, d3
176 vand.i16 d4, d7, d3
177 vand.i32 d4, d7, d3
178 vand.i64 d4, d7, d3
180 vand.s8 d4, d7, d3
[all …]
Dneon-vld-encoding.s13 vld1.32 {d5, d6, d7}, [r3]
14 vld1.64 {d6, d7, d8}, [r3:64]
16 vld1.16 {d4, d5, d6, d7}, [r3:64]
17 vld1.32 {d5, d6, d7, d8}, [r3]
18 vld1.64 {d6, d7, d8, d9}, [r3:64]
40 vld1.32 {d5, d6, d7}, [r3]!
41 vld1.64 {d6, d7, d8}, [r3:64]!
45 vld1.32 {d5, d6, d7}, [r3], r6
46 vld1.64 {d6, d7, d8}, [r3:64], r6
49 vld1.16 {d4, d5, d6, d7}, [r3:64]!
[all …]
Dneon-mul-accum-encoding.s41 vqdmlal.s16 q11, d11, d7[0]
42 vqdmlal.s16 q11, d11, d7[1]
43 vqdmlal.s16 q11, d11, d7[2]
44 vqdmlal.s16 q11, d11, d7[3]
48 @ CHECK: vqdmlal.s16 q11, d11, d7[0] @ encoding: [0x47,0x63,0xdb,0xf2]
49 @ CHECK: vqdmlal.s16 q11, d11, d7[1] @ encoding: [0x4f,0x63,0xdb,0xf2]
50 @ CHECK: vqdmlal.s16 q11, d11, d7[2] @ encoding: [0x67,0x63,0xdb,0xf2]
51 @ CHECK: vqdmlal.s16 q11, d11, d7[3] @ encoding: [0x6f,0x63,0xdb,0xf2]
Dneont2-mul-accum-encoding.s45 vqdmlal.s16 q11, d11, d7[0]
46 vqdmlal.s16 q11, d11, d7[1]
47 vqdmlal.s16 q11, d11, d7[2]
48 vqdmlal.s16 q11, d11, d7[3]
52 @ CHECK: vqdmlal.s16 q11, d11, d7[0] @ encoding: [0xdb,0xef,0x47,0x63]
53 @ CHECK: vqdmlal.s16 q11, d11, d7[1] @ encoding: [0xdb,0xef,0x4f,0x63]
54 @ CHECK: vqdmlal.s16 q11, d11, d7[2] @ encoding: [0xdb,0xef,0x67,0x63]
55 @ CHECK: vqdmlal.s16 q11, d11, d7[3] @ encoding: [0xdb,0xef,0x6f,0x63]
Deh-directive-vsave.s50 .vsave {d0, d1, d2, d3, d4, d5, d6, d7}
51 vpush {d0, d1, d2, d3, d4, d5, d6, d7}
52 vpop {d0, d1, d2, d3, d4, d5, d6, d7}
63 .vsave {d2, d3, d4, d5, d6, d7}
64 vpush {d2, d3, d4, d5, d6, d7}
65 vpop {d2, d3, d4, d5, d6, d7}
/external/libhevc/common/arm/
Dihevc_inter_pred_chroma_vert_w16out.s186 vdup.32 d7,d6[1]
187 vld1.32 {d7[1]},[r6],r2 @loads pu1_src_tmp
188 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
189 vdup.32 d7,d7[1]
190 vld1.32 {d7[1]},[r6],r2
192 vmlal.u8 q2,d7,d2
193 vdup.32 d7,d7[1]
194 vld1.32 {d7[1]},[r6]
196 vmlsl.u8 q2,d7,d3
233 vld1.8 {d7},[r6],r2 @load and increment
[all …]
Dihevc_inter_pred_chroma_vert.s186 vdup.32 d7,d6[1]
187 vld1.32 {d7[1]},[r6],r2 @loads pu1_src_tmp
188 vmull.u8 q2,d7,d1 @vmull_u8(vreinterpret_u8_u32(src_tmp2), coeffabs_1)
189 vdup.32 d7,d7[1]
190 vld1.32 {d7[1]},[r6],r2
192 vmlal.u8 q2,d7,d2
193 vdup.32 d7,d7[1]
194 vld1.32 {d7[1]},[r6]
196 vmlsl.u8 q2,d7,d3
233 vld1.8 {d7},[r6],r2 @load and increment
[all …]
Dihevc_intra_pred_luma_planar.s152 …vmov d7, d5 @mov #1 to d7 to used for inc for row+1 and dec for nt-1-r…
200 vadd.s8 d5, d5, d7 @(1)
202 vsub.s8 d6, d6, d7 @(1)
215 vadd.s8 d5, d5, d7 @(2)
216 vsub.s8 d6, d6, d7 @(2)
232 vadd.s8 d5, d5, d7 @(3)
233 vsub.s8 d6, d6, d7 @(3)
249 vadd.s8 d5, d5, d7 @(4)
250 vsub.s8 d6, d6, d7 @(4)
265 vadd.s8 d5, d5, d7 @(5)
[all …]
Dihevc_padding.s146 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
147 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
148 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
149 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
150 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
265 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
266 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
267 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
268 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
269 vst1.8 {d6,d7},[r7]! @128/8 = 16 bytes store
[all …]
Dihevc_itrans_recon_32x32.s122 @d5[0]= 50 d7[0]=18
123 @d5[1]= 46 d7[1]=13
124 @d5[2]= 43 d7[2]=9
125 @d5[3]= 38 d7[3]=4
172 vld1.16 {d4,d5,d6,d7},[r14]!
254 vmlsl.s16 q15,d14,d7[1]
259 vmlsl.s16 q14,d15,d7[1]
268 vmlal.s16 q8,d13,d7[2]
269 vmlal.s16 q9,d12,d7[0]
287 vmlsl.s16 q13,d9,d7[3] @// y1 * cos3 - y3 * sin1(part of b1)
[all …]
Dihevc_itrans_recon_16x16.s225 vld1.16 d7,[r9],r10
239 @d7=r3
247 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
248 vmlal.s16 q13,d7,d2[1] @// y1 * cos3 - y3 * sin1(part of b1)
249 vmlal.s16 q14,d7,d3[3] @// y1 * sin3 - y3 * cos1(part of b2)
250 vmlsl.s16 q15,d7,d2[3] @// y1 * sin1 - y3 * sin3(part of b3)
308 vld1.16 d7,[r9],r10
322 vmlal.s16 q12,d7,d2[3] @// y1 * cos1 + y3 * cos3(part of b0)
323 vmlsl.s16 q13,d7,d0[1] @// y1 * cos3 - y3 * sin1(part of b1)
324 vmlal.s16 q14,d7,d2[1] @// y1 * sin3 - y3 * cos1(part of b2)
[all …]
Dihevc_inter_pred_filters_luma_vert.s167 vld1.u8 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
172 vmlsl.u8 q4,d7,d29 @mul_res1 = vmlsl_u8(mul_res1, src_tmp4, coeffabs_7)@
192 vmlal.u8 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
208 vmlsl.u8 q6,d7,d27
222 vmlal.u8 q7,d7,d26
229 vld1.u8 {d7},[r3],r2 @src_tmp4 = vld1_u8(pu1_src_tmp)@
260 vmlsl.u8 q4,d7,d29 @mul_res1 = vmlsl_u8(mul_res1, src_tmp4, coeffabs_7)@
284 vmlal.u8 q5,d7,d28 @mul_res2 = vmlal_u8(mul_res2, src_tmp4, coeffabs_6)@
307 vmlsl.u8 q6,d7,d27
329 vmlal.u8 q7,d7,d26
[all …]
Dihevc_itrans_recon_8x8.s191 vld1.16 d7,[r9]!
200 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
202 vmlsl.s16 q13,d7,d1[3] @// y1 * cos3 - y3 * sin1(part of b1)
204 vmlsl.s16 q14,d7,d0[1] @// y1 * sin3 - y3 * cos1(part of b2)
206 vmlsl.s16 q15,d7,d1[1] @// y1 * sin1 - y3 * sin3(part of b3)
273 vqrshrn.s32 d7,q13,#shift_stage1_idct @// r3 = (a3 + b3 + rnd) >> 7(shift_stage1_idct)
290 vld1.16 d7,[r9]!
307 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
308 vmlsl.s16 q13,d7,d1[3] @// y1 * cos3 - y3 * sin1(part of b1)
309 vmlsl.s16 q14,d7,d0[1] @// y1 * sin3 - y3 * cos1(part of b2)
[all …]
Dihevc_intra_pred_chroma_planar.s152 …vmov d7, d5 @mov #1 to d7 to used for inc for row+1 and dec for nt-1-r…
193 vadd.s8 d18, d5, d7 @row++ [(row+1)++]c
197 vsub.s8 d19, d6, d7 @[nt-1-row]--
215 vadd.s8 d5, d18, d7 @row++ [(row+1)++]
217 vsub.s8 d6, d19, d7 @[nt-1-row]--
237 vadd.s8 d18, d5, d7 @row++ [(row+1)++]c
239 vsub.s8 d19, d6, d7 @[nt-1-row]--
262 vadd.s8 d5, d18, d7 @row++ [(row+1)++]
264 vsub.s8 d6, d19, d7 @[nt-1-row]--
308 …vmov d7, d5 @mov #1 to d7 to used for inc for row+1 and dec for nt-1-r…
[all …]
Dihevc_intra_pred_luma_mode_3_to_9.s203 vsub.s8 d7, d28, d6 @32-fract
210 vmull.u8 q12, d12, d7 @mul (row 0)
220 vmull.u8 q11, d16, d7 @mul (row 1)
231 vmull.u8 q10, d14, d7 @mul (row 2)
242 vmull.u8 q9, d10, d7 @mul (row 3)
253 vmull.u8 q12, d12, d7 @mul (row 4)
264 vmull.u8 q11, d16, d7 @mul (row 5)
275 vmull.u8 q10, d14, d7 @mul (row 6)
279 vmull.u8 q9, d10, d7 @mul (row 7)
330 vmull.u8 q10, d14, d7 @mul (row 6)
[all …]
Dihevc_intra_pred_filters_luma_mode_11_to_17.s313 vsub.s8 d7, d28, d6 @32-fract
320 vmull.u8 q12, d12, d7 @mul (row 0)
330 vmull.u8 q11, d16, d7 @mul (row 1)
341 vmull.u8 q10, d14, d7 @mul (row 2)
352 vmull.u8 q9, d10, d7 @mul (row 3)
363 vmull.u8 q12, d12, d7 @mul (row 4)
374 vmull.u8 q11, d16, d7 @mul (row 5)
385 vmull.u8 q10, d14, d7 @mul (row 6)
389 vmull.u8 q9, d10, d7 @mul (row 7)
441 vmull.u8 q10, d14, d7 @mul (row 6)
[all …]
/external/libvpx/libvpx/vpx_dsp/arm/
Dloopfilter_4_neon.asm57 vld1.u8 {d7}, [r2@64], r1 ; q0
70 vst1.u8 {d7}, [r3@64], r1 ; store oq1
117 vld1.u8 {d7}, [r2], r1
123 vtrn.32 d3, d7
130 vtrn.16 d7, d17
135 vtrn.8 d7, d16
143 vst4.8 {d4[0], d5[0], d6[0], d7[0]}, [r0], r1
144 vst4.8 {d4[1], d5[1], d6[1], d7[1]}, [r0], r1
145 vst4.8 {d4[2], d5[2], d6[2], d7[2]}, [r0], r1
146 vst4.8 {d4[3], d5[3], d6[3], d7[3]}, [r0], r1
[all …]
/external/valgrind/none/tests/arm/
Dneon64.c646 TESTINSN_imm("vmov.i16 d7", d7, 0x700); in main()
661 TESTINSN_imm("vmvn.i16 d7", d7, 0x700); in main()
749 TESTINSN_bin("vorr d7, d3, d0", d7, d3, i8, 0x24, d0, i16, 0xff); in main()
756 TESTINSN_bin("vorn d7, d3, d0", d7, d3, i8, 0x24, d0, i16, 0xff); in main()
767 TESTINSN_bin("veor d7, d3, d0", d7, d3, i8, 0x24, d0, i16, 0xff); in main()
778 TESTINSN_bin("vbsl d7, d3, d0", d7, d3, i8, 0x24, d0, i16, 0xff); in main()
789 TESTINSN_bin("vbit d7, d3, d0", d7, d3, i8, 0x24, d0, i16, 0xff); in main()
800 TESTINSN_bin("vbif d7, d3, d0", d7, d3, i8, 0x24, d0, i16, 0xff); in main()
898 TESTINSN_bin("vrhadd.s8 d5, d7, d5", d5, d7, i32, (1 << 31) + 1, d5, i32, (1 << 31) + 2); in main()
901 TESTINSN_bin("vrhadd.s8 d5, d7, d5", d5, d7, i32, (1 << 31) + 1, d5, i32, (1 << 31) + 3); in main()
[all …]
/external/libpng/arm/
Dfilter_neon.S69 vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
73 vadd.u8 d3, d2, d7
93 vext.8 d7, d23, d23, #1
98 vadd.u8 d3, d2, d7
124 vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
133 vadd.u8 d3, d3, d7
159 vext.8 d7, d23, d23, #1
168 vadd.u8 d3, d3, d7
197 vld4.32 {d4[],d5[],d6[],d7[]}, [r1,:128]
207 vadd.u8 d3, d3, d7
[all …]
/external/kernel-headers/original/uapi/linux/
Duuid.h35 #define UUID_LE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ argument
40 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
42 #define UUID_BE(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) \ argument
47 (d0), (d1), (d2), (d3), (d4), (d5), (d6), (d7) }})
/external/libavc/common/arm/
Dih264_deblk_chroma_a9.s96 vld2.8 {d6, d7}, [r0], r1 @D6 = p1u , D7 = p1v
102 vaddl.u8 q5, d7, d1 @Q4,Q5 = q0 + p1
114 vmlal.u8 q14, d7, d31 @Q14,Q7 = (X2(p1U) + p0U + q1U)
179 vld4.16 {d1[0], d3[0], d5[0], d7[0]}, [r0], r1
180 vld4.16 {d1[1], d3[1], d5[1], d7[1]}, [r0], r1
181 vld4.16 {d1[2], d3[2], d5[2], d7[2]}, [r0], r1
182 vld4.16 {d1[3], d3[3], d5[3], d7[3]}, [r0], r1
192 vaddl.u8 q8, d3, d7 @(p0 + q1)
202 vmlal.u8 q10, d7, d31 @2*q1 + (p1 + q0)
218 vst4.16 {d1[0], d3[0], d5[0], d7[0]}, [r12], r1
[all …]
/external/libmpeg2/common/arm/
Dimpeg2_idct.s154 vld1.8 d7, [r2], r1
181 vaddw.u8 q11, q15, d7
185 vqmovun.s16 d7, q11
189 vst1.8 d7, [r3], r6
446 vld1.16 d7, [r9]!
455 vmlal.s16 q12, d7, d0[3] @// y1 * cos1 + y3 * cos3(part of b0)
457 vmlsl.s16 q13, d7, d1[3] @// y1 * cos3 - y3 * sin1(part of b1)
459 vmlsl.s16 q14, d7, d0[1] @// y1 * sin3 - y3 * cos1(part of b2)
461 vmlsl.s16 q15, d7, d1[1] @// y1 * sin1 - y3 * sin3(part of b3)
528 vqrshrn.s32 d7, q13, #idct_stg1_shift @// r3 = (a3 + b3 + rnd) >> 7(IDCT_STG1_SHIFT)
[all …]
Dicv_sad_a9.s85 vld1.8 d7, [r1], r3
86 vabal.u8 q0, d7, d6
94 vld1.8 d7, [r1], r3
95 vabal.u8 q0, d7, d6
/external/llvm/test/CodeGen/X86/
Dfp128-calling-conv.ll9 …, fp128 %d1, fp128 %d2, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d…
17 …, fp128 %d1, fp128 %d2, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d…
25 …, fp128 %d1, fp128 %d2, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d…
27 ret fp128 %d7
33 …, fp128 %d1, fp128 %d2, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d…
41 …, fp128 %d1, fp128 %d2, fp128 %d3, fp128 %d4, fp128 %d5, fp128 %d6, fp128 %d7, fp128 %d8, fp128 %d…
/external/libavc/encoder/arm/
Dime_distortion_metrics_a9q.s99 vld1.8 {d6, d7}, [r1], r3
103 vabdl.u8 q1, d7, d5
111 vld1.8 {d6, d7}, [r1], r3
115 vabal.u8 q1, d7, d5
181 vld1.8 {d6, d7}, [r1], r3
186 vabdl.u8 q1, d7, d5
194 vld1.8 {d6, d7}, [r1], r3
198 vabal.u8 q1, d7, d5
264 vld1.8 {d6, d7}, [r1], r3
270 vabdl.u8 q1, d7, d5
[all …]

12345678910>>...12