Home
last modified time | relevance | path

Searched refs:d4 (Results 1 – 25 of 133) sorted by relevance

123456

/external/llvm/test/MC/ARM/
Dneon-bitwise-encoding.s124 vand d4, d7, d3
125 vand.8 d4, d7, d3
126 vand.16 d4, d7, d3
127 vand.32 d4, d7, d3
128 vand.64 d4, d7, d3
130 vand.i8 d4, d7, d3
131 vand.i16 d4, d7, d3
132 vand.i32 d4, d7, d3
133 vand.i64 d4, d7, d3
135 vand.s8 d4, d7, d3
[all …]
Dneon-mul-encoding.s106 vmul.i16 d0, d4[2]
110 vmul.s32 d4, d3[1]
111 vmul.u32 d5, d4[0]
114 vmul.i16 q0, d4[2]
119 vmul.u32 q5, d4[0]
122 vmul.i16 d9, d0, d4[2]
126 vmul.s32 d5, d4, d3[1]
127 vmul.u32 d4, d5, d4[0]
130 vmul.i16 q9, q0, d4[2]
135 vmul.u32 q4, q5, d4[0]
[all …]
Dneon-vld-encoding.s12 vld1.16 {d4, d5, d6}, [r3:64]
15 vld1.8 {d1, d2, d3, d4}, [r3]
16 vld1.16 {d4, d5, d6, d7}, [r3:64]
39 vld1.16 {d4, d5, d6}, [r3:64]!
44 vld1.16 {d4, d5, d6}, [r3:64], r6
48 vld1.8 {d1, d2, d3, d4}, [r3]!
49 vld1.16 {d4, d5, d6, d7}, [r3:64]!
53 vld1.8 {d1, d2, d3, d4}, [r3], r8
54 vld1.16 {d4, d5, d6, d7}, [r3:64], r8
67 @ CHECK: vld1.16 {d4, d5, d6}, [r3:64] @ encoding: [0x5f,0x46,0x23,0xf4]
[all …]
Dneon-shift-encoding.s382 vshl.s8 d4, d5
383 vshl.s16 d4, d5
384 vshl.s32 d4, d5
385 vshl.s64 d4, d5
387 vshl.u8 d4, d5
388 vshl.u16 d4, d5
389 vshl.u32 d4, d5
390 vshl.u64 d4, d5
402 @ CHECK: vshl.s8 d4, d4, d5 @ encoding: [0x04,0x44,0x05,0xf2]
403 @ CHECK: vshl.s16 d4, d4, d5 @ encoding: [0x04,0x44,0x15,0xf2]
[all …]
Dneon-vst-encoding.s70 vst3.u32 {d2, d3, d4}, [r8], r3
71 vst3.8 {d4, d6, d8}, [r9], r4
90 @ CHECK: vst3.32 {d2, d3, d4}, [r8], r3 @ encoding: [0x83,0x24,0x08,0xf4]
91 @ CHECK: vst3.8 {d4, d6, d8}, [r9], r4 @ encoding: [0x04,0x45,0x09,0xf4]
157 vst2.16 {d2[1], d4[1]}, [r3], r5
174 @ CHECK: vst2.16 {d2[1], d4[1]}, [r3], r5 @ encoding: [0x65,0x25,0x83,0xf4]
186 vst3.u32 {d2[1], d3[1], d4[1]}, [r8], r3
203 @ CHECK: vst3.32 {d2[1], d3[1], d4[1]}, [r8], r3 @ encoding: [0x83,0x2a,0x88,0xf4]
263 @ CHECK: vst1.8 {d4, d5}, [r2] @ encoding: [0x0f,0x4a,0x02,0xf4]
264 @ CHECK: vst1.8 {d4, d5}, [r2] @ encoding: [0x0f,0x4a,0x02,0xf4]
[all …]
/external/kernel-headers/original/asm-generic/
Dxor.h109 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_2() local
114 d4 = p1[4]; in xor_32regs_2()
122 d4 ^= p2[4]; in xor_32regs_2()
130 p1[4] = d4; in xor_32regs_2()
146 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_3() local
151 d4 = p1[4]; in xor_32regs_3()
159 d4 ^= p2[4]; in xor_32regs_3()
167 d4 ^= p3[4]; in xor_32regs_3()
175 p1[4] = d4; in xor_32regs_3()
192 register long d0, d1, d2, d3, d4, d5, d6, d7; in xor_32regs_4() local
[all …]
/external/valgrind/main/none/tests/arm/
Dneon64.c700 TESTINSN_bin("vand d4, d6, d5", d4, d6, i8, 0xff, d5, i16, 0x57); in main()
706 TESTINSN_bin("vbic d4, d6, d5", d4, d6, i8, 0xff, d5, i16, 0x57); in main()
713 TESTINSN_bin("vorr d4, d4, d4", d4, d4, i16, 0xff, d4, i16, 0xff); in main()
719 TESTINSN_bin("vorn d4, d4, d4", d4, d4, i16, 0xff, d4, i16, 0xff); in main()
724 TESTINSN_bin("veor d4, d6, d5", d4, d6, i8, 0xff, d5, i16, 0x57); in main()
729 TESTINSN_bin("veor d4, d4, d4", d4, d4, i16, 0xff, d4, i16, 0xff); in main()
734 TESTINSN_bin("vbsl d4, d6, d5", d4, d6, i8, 0xff, d5, i16, 0x57); in main()
739 TESTINSN_bin("vbsl d4, d4, d4", d4, d4, i16, 0xff, d4, i16, 0xff); in main()
744 TESTINSN_bin("vbit d4, d6, d5", d4, d6, i8, 0xff, d5, i16, 0x57); in main()
749 TESTINSN_bin("vbit d4, d4, d4", d4, d4, i16, 0xff, d4, i16, 0xff); in main()
[all …]
/external/libvpx/libvpx/vp8/common/arm/neon/
Dsad8_neon.asm36 vld1.8 {d4}, [r0], r1
44 vabal.u8 q12, d4, d12
56 vld1.8 {d4}, [r0], r1
64 vabal.u8 q12, d4, d12
93 vld1.8 {d4}, [r0], r1
101 vabal.u8 q12, d4, d12
113 vld1.8 {d4}, [r0], r1
121 vabal.u8 q12, d4, d12
133 vld1.8 {d4}, [r0], r1
141 vabal.u8 q12, d4, d12
[all …]
Ddequant_idct_neon.asm44 vswp d3, d4 ;q2(vp[4] vp[12])
63 vqsub.s16 d4, d13, d10
66 vtrn.32 d2, d4
69 vtrn.16 d4, d5
74 vswp d3, d4
94 vqsub.s16 d4, d13, d10
101 vrshr.s16 d4, d4, #3
104 vtrn.32 d2, d4
107 vtrn.16 d4, d5
Ddc_only_idct_add_neon.asm36 vld1.32 {d4[0]}, [r1], r2
37 vld1.32 {d4[1]}, [r1]
40 vaddw.u8 q2, q0, d4
43 vqmovun.s16 d4, q2
47 vst1.32 {d4[0]}, [r3], r12
48 vst1.32 {d4[1]}, [r3]
Didct_dequant_0_2x_neon.asm31 vld1.32 {d4[0]}, [r2], r3
33 vld1.32 {d4[1]}, [r2], r3
54 vaddw.u8 q2, q0, d4
62 vqmovun.s16 d4, q2
70 vst1.32 {d4[0]}, [r2], r3
72 vst1.32 {d4[1]}, [r2]
Dsixtappredict8x4_neon.asm70 vdup.8 d4, d26[0]
99 vmlsl.u8 q7, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
100 vmlsl.u8 q8, d29, d4
101 vmlsl.u8 q9, d30, d4
102 vmlsl.u8 q10, d31, d4
180 vmlsl.u8 q8, d27, d4 ;-(src_ptr[2] * vp8_filter[4])
181 vmlsl.u8 q9, d28, d4
182 vmlsl.u8 q10, d29, d4
183 vmlsl.u8 q11, d30, d4
184 vmlsl.u8 q12, d31, d4
[all …]
Dsixtappredict4x4_neon.asm68 vdup.8 d4, d26[0]
109 vmlsl.u8 q7, d6, d4 ;-(src_ptr[2] * vp8_filter[4])
110 vmlsl.u8 q8, d10, d4
182 vmlsl.u8 q7, d6, d4 ;-(src_ptr[2] * vp8_filter[4])
183 vmlsl.u8 q8, d10, d4
184 vmlsl.u8 q12, d31, d4 ;-(src_ptr[2] * vp8_filter[4])
228 vdup.8 d4, d16[0]
237 vmlsl.u8 q3, d29, d4 ;-(src_ptr[2] * vp8_filter[4])
238 vmlsl.u8 q4, d30, d4
257 vqrshrun.s16 d4, q6, #7
[all …]
Dsixtappredict8x8_neon.asm73 vdup.8 d4, d26[0]
103 vmlsl.u8 q7, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
104 vmlsl.u8 q8, d29, d4
105 vmlsl.u8 q9, d30, d4
106 vmlsl.u8 q10, d31, d4
193 vmlsl.u8 q8, d27, d4 ;-(src_ptr[2] * vp8_filter[4])
194 vmlsl.u8 q9, d28, d4
195 vmlsl.u8 q10, d29, d4
196 vmlsl.u8 q11, d30, d4
197 vmlsl.u8 q12, d31, d4
[all …]
Dsixtappredict16x16_neon.asm76 vdup.8 d4, d26[0]
116 vmlsl.u8 q8, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
117 vmlsl.u8 q10, d29, d4
118 vmlsl.u8 q12, d30, d4
124 vmlsl.u8 q9, d28, d4 ;-(src_ptr[2] * vp8_filter[4])
125 vmlsl.u8 q11, d29, d4
126 vmlsl.u8 q13, d30, d4
215 vdup.8 d4, d16[0]
242 vmlsl.u8 q3, d22, d4 ;-(src_ptr[2] * vp8_filter[4])
243 vmlsl.u8 q4, d23, d4
[all …]
Dsad16_neon.asm45 vabal.u8 q12, d4, d12
70 vabal.u8 q12, d4, d12
95 vabal.u8 q12, d4, d12
120 vabal.u8 q12, d4, d12
164 vabal.u8 q12, d4, d12
188 vabal.u8 q12, d4, d12
Dvp8_subpixelvariance16x16s_neon.asm45 vld1.u8 {d4, d5, d6, d7}, [r0], r1
70 vsubl.u8 q0, d4, d26
98 vmlal.s16 q9, d4, d4
161 vsubl.u8 q13, d4, d6
191 vmlal.s16 q9, d4, d4
245 vld1.u8 {d4, d5, d6, d7}, [r0], r1
278 vsubl.u8 q0, d4, d14 ;diff
372 vld1.u8 {d4, d5, d6, d7}, [r0], r1
399 vst1.u8 {d4, d5, d6, d7}, [r3]!
414 vld1.u8 {d4, d5, d6, d7}, [r0], r1
[all …]
Dbilinearpredict8x8_neon.asm47 vmull.u8 q7, d4, d0
52 vext.8 d5, d4, d5, #1
74 vmull.u8 q7, d4, d0
80 vext.8 d5, d4, d5, #1
131 vqrshrn.u16 d4, q3, #7
140 vst1.u8 {d4}, [r1], lr
/external/libvpx/libvpx/vp8/encoder/arm/neon/
Dshortfdct_neon.asm50 vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[3]
58 vadd.s16 d0, d4, d5 ; op[0] = a1 + b1
59 vsub.s16 d2, d4, d5 ; op[2] = a1 - b1
80 vadd.s16 d4, d0, d3 ; a1 = ip[0] + ip[12]
83 vadd.s16 d4, d4, d26 ; a1 + 7
86 vadd.s16 d0, d4, d5 ; op[0] = a1 + b1 + 7
87 vsub.s16 d2, d4, d5 ; op[8] = a1 - b1 + 7
92 vceq.s16 d4, d7, #0
100 vmvn.s16 d4, d4
102 vsub.s16 d1, d1, d4 ; op[4] += (d1!=0)
[all …]
Dvp8_shortwalsh4x4_neon.asm40 vadd.s16 d4, d0, d2 ; ip[0] + ip[2]
45 vshl.s16 d4, d4, #2 ; a1 = (ip[0] + ip[2]) << 2
48 vceq.s16 d16, d4, #0 ; a1 == 0
51 vadd.s16 d0, d4, d5 ; a1 + d1
53 vsub.s16 d3, d4, d5 ; op[3] = a1 - d1
Dsubtract_neon.asm40 vld1.8 {d4}, [r3], r6
47 vsubl.u8 q12, d4, d5
85 vsubl.u8 q10, d4, d6
127 vld1.8 {d4}, [r1], r3
142 vsubl.u8 q10, d4, d5
163 vld1.8 {d4}, [r2], r3
178 vsubl.u8 q10, d4, d5
/external/valgrind/main/massif/tests/
Dalloc-fns.c18 void d4(int n) { malloc(n); } in d4() function
19 void d3(int n) { d4(n); } in d3()
33 d4(400); in main()
/external/llvm/test/CodeGen/Mips/
Dsitofp-selectcc-opt.ll3 @foo12.d4 = internal unnamed_addr global double 0.000000e+00, align 8
17 %1 = load double* @foo12.d4, align 8
19 store double %add, double* @foo12.d4, align 8
/external/openssh/
Dumac.c339 UINT32 d0,d1,d2,d3,d4,d5,d6,d7; in nh_aux() local
346 d4 = LOAD_UINT32_LITTLE(d+4); d5 = LOAD_UINT32_LITTLE(d+5); in nh_aux()
350 h += MUL64((k0 + d0), (k4 + d4)); in nh_aux()
372 UINT32 d0,d1,d2,d3,d4,d5,d6,d7; in nh_aux() local
382 d4 = LOAD_UINT32_LITTLE(d+4); d5 = LOAD_UINT32_LITTLE(d+5); in nh_aux()
387 h1 += MUL64((k0 + d0), (k4 + d4)); in nh_aux()
388 h2 += MUL64((k4 + d0), (k8 + d4)); in nh_aux()
419 UINT32 d0,d1,d2,d3,d4,d5,d6,d7; in nh_aux() local
431 d4 = LOAD_UINT32_LITTLE(d+4); d5 = LOAD_UINT32_LITTLE(d+5); in nh_aux()
436 h1 += MUL64((k0 + d0), (k4 + d4)); in nh_aux()
[all …]
/external/valgrind/main/none/tests/s390x/
Dopcodes.h41 #define RIS_RURDI(op1,r1,m3,b4,d4,i2,op2) \ argument
43 ".long 0x" #b4 #d4 #i2 #op2 "\n\t"
47 #define RRS(op1,r1,r2,b4,d4,m3,u0,op2) \ argument
49 ".long 0x" #b4 #d4 #m3 #u0 #op2 "\n\t"
58 #define RIS_RURDU(op1,r1,m3,b4,d4,i2,op2) \ argument
60 ".long 0x" #b4 #d4 #i2 #op2 "\n\t"
130 #define CGIB(r1,m3,b4,d4,i2) RIS_RURDI(ec,r1,m3,b4,d4,i2,fc) argument
132 #define CGRB(r1,r2,b4,d4,m3) RRS(ec,r1,r2,b4,d4,m3,0,e4) argument
142 #define CIB(r1,m3,b4,d4,i2) RIS_RURDI(ec,r1,m3,b4,d4,i2,fe) argument
151 #define CLGIB(r1,m3,b4,d4,i2) RIS_RURDU(ec,r1,m3,b4,d4,i2,fd) argument
[all …]

123456