/external/llvm/test/MC/ARM/ |
D | neon-vld-vst-align.s | 5 vld1.8 {d0}, [r4] 6 vld1.8 {d0}, [r4:16] 7 vld1.8 {d0}, [r4:32] 8 vld1.8 {d0}, [r4:64] 9 vld1.8 {d0}, [r4:128] 10 vld1.8 {d0}, [r4:256] 12 @ CHECK: vld1.8 {d0}, [r4] @ encoding: [0x24,0xf9,0x0f,0x07] 14 @ CHECK-ERRORS: vld1.8 {d0}, [r4:16] 17 @ CHECK-ERRORS: vld1.8 {d0}, [r4:32] 19 @ CHECK: vld1.8 {d0}, [r4:64] @ encoding: [0x24,0xf9,0x1f,0x07] [all …]
|
D | vfp-aliases.s | 8 fstmfdd sp!, {d0} 9 fstmead sp!, {d0} 10 fstmdbd sp!, {d0} 11 fstmiad sp!, {d0} 21 fldmiad sp!, {d0} 22 fldmdbd sp!, {d0} 23 fldmead sp!, {d0} 24 fldmfdd sp!, {d0} 26 fstmeax sp!, {d0} 27 fldmfdx sp!, {d0} [all …]
|
D | directive-arch_extension-fp.s | 35 vselgt.f64 d0, d0, d0 37 vselge.f64 d0, d0, d0 39 vseleq.f64 d0, d0, d0 41 vselvs.f64 d0, d0, d0 43 vmaxnm.f64 d0, d0, d0 45 vminnm.f64 d0, d0, d0 48 vcvtb.f64.f16 d0, s0 50 vcvtb.f16.f64 s0, d0 52 vcvtt.f64.f16 d0, s0 54 vcvtt.f16.f64 s0, d0 [all …]
|
D | fullfp16-neon.s | 4 vadd.f16 d0, d1, d2 6 @ ARM: vadd.f16 d0, d1, d2 @ encoding: [0x02,0x0d,0x11,0xf2] 8 @ THUMB: vadd.f16 d0, d1, d2 @ encoding: [0x11,0xef,0x02,0x0d] 11 vsub.f16 d0, d1, d2 13 @ ARM: vsub.f16 d0, d1, d2 @ encoding: [0x02,0x0d,0x31,0xf2] 15 @ THUMB: vsub.f16 d0, d1, d2 @ encoding: [0x31,0xef,0x02,0x0d] 18 vmul.f16 d0, d1, d2 20 @ ARM: vmul.f16 d0, d1, d2 @ encoding: [0x12,0x0d,0x11,0xf3] 22 @ THUMB: vmul.f16 d0, d1, d2 @ encoding: [0x11,0xff,0x12,0x0d] 32 vmla.f16 d0, d1, d2 [all …]
|
D | directive-arch_extension-simd.s | 24 vmaxnm.f64 d0, d0, d0 26 vminnm.f64 d0, d0, d0 33 vcvta.s32.f64 s0, d0 35 vcvta.u32.f64 s0, d0 41 vcvtn.s32.f64 s0, d0 43 vcvtn.u32.f64 s0, d0 49 vcvtp.s32.f64 s0, d0 51 vcvtp.u32.f64 s0, d0 57 vcvtm.s32.f64 s0, d0 59 vcvtm.u32.f64 s0, d0 [all …]
|
D | fullfp16-neon-neg.s | 6 vadd.f16 d0, d1, d2 11 vsub.f16 d0, d1, d2 16 vmul.f16 d0, d1, d2 26 vmla.f16 d0, d1, d2 36 vmls.f16 d0, d1, d2 46 vfma.f16 d0, d1, d2 51 vfms.f16 d0, d1, d2 106 vacge.f16 d0, d1, d2 111 vacgt.f16 d0, d1, d2 116 vacle.f16 d0, d1, d2 [all …]
|
D | vfp-aliases-diagnostics.s | 13 fstmfds sp!, {d0} 14 fstmeas sp!, {d0} 15 fstmdbs sp!, {d0} 16 fstmias sp!, {d0} 18 fldmias sp!, {d0} 19 fldmdbs sp!, {d0} 20 fldmeas sp!, {d0} 21 fldmfds sp!, {d0} 47 @ CHECK: fstmfds sp!, {d0} 50 @ CHECK: fstmeas sp!, {d0} [all …]
|
/external/valgrind/none/tests/arm/ |
D | neon64.c | 642 TESTINSN_imm("vmov.i32 d0", d0, 0x7); in main() 652 TESTINSN_imm("vmov.f32 d0", d0, 0.328125); in main() 653 TESTINSN_imm("vmov.f32 d0", d0, -0.328125); in main() 657 TESTINSN_imm("vmvn.i32 d0", d0, 0x7); in main() 670 TESTINSN_imm("vorr.i32 d0", d0, 0x7); in main() 679 TESTINSN_imm("vbic.i32 d0", d0, 0x7); in main() 688 TESTINSN_un("vmvn d0, d1", d0, d1, i32, 24); in main() 690 TESTINSN_un("vmvn d0, d14", d0, d14, i32, 24); in main() 694 TESTINSN_un("vmov d0, d1", d0, d1, i32, 24); in main() 696 TESTINSN_un("vmov d0, d14", d0, d14, i32, 24); in main() [all …]
|
/external/libavc/common/arm/ |
D | ih264_inter_pred_luma_horz_hpel_vert_qpel_a9q.s | 152 vext.8 d5, d0, d1, #5 153 vaddl.u8 q3, d0, d5 155 vext.8 d2, d0, d1, #2 156 vext.8 d3, d0, d1, #3 158 vext.8 d4, d0, d1, #4 160 vext.8 d1, d0, d1, #1 164 vext.8 d5, d0, d1, #5 165 vaddl.u8 q4, d0, d5 166 vext.8 d2, d0, d1, #2 167 vext.8 d3, d0, d1, #3 [all …]
|
/external/libmpeg2/common/arm/ |
D | impeg2_mem_func.s | 103 vdup.8 d0, r1 @//r1 is the 8-bit value to be set into 105 vst1.8 {d0}, [r0], r2 @//Store the row 1 106 vst1.8 {d0}, [r0], r2 @//Store the row 2 107 vst1.8 {d0}, [r0], r2 @//Store the row 3 108 vst1.8 {d0}, [r0], r2 @//Store the row 4 109 vst1.8 {d0}, [r0], r2 @//Store the row 5 110 vst1.8 {d0}, [r0], r2 @//Store the row 6 111 vst1.8 {d0}, [r0], r2 @//Store the row 7 112 vst1.8 {d0}, [r0], r2 @//Store the row 8 155 vst1.16 {d0, d1} , [r0]! @row1 [all …]
|
D | ideint_cac_a9.s | 150 vadd.u32 d21, d0, d1 160 vabd.u8 d0, d0, d1 166 vcge.u8 d1, d0, d9 167 vand.u8 d0, d0, d1 168 @ d0 now contains 8 absolute diff of sums above the threshold 171 vpaddl.u8 d0, d0 172 vshl.u16 d0, d0, #2 175 vadd.u16 d20, d0, d20 181 vrhadd.u8 d0, d28, d29 183 vrhadd.u8 d0, d0, d2 [all …]
|
D | impeg2_inter_pred.s | 109 vld1.8 {d0, d1}, [r4], r2 @Load and increment src 110 vst1.8 {d0, d1}, [r5], r3 @Store and increment dst 113 vld1.8 {d0, d1}, [r4], r2 @Load and increment src 114 vst1.8 {d0, d1}, [r5], r3 @Store and increment dst 115 vld1.8 {d0, d1}, [r4], r2 @Load and increment src 116 vst1.8 {d0, d1}, [r5], r3 @Store and increment dst 117 vld1.8 {d0, d1}, [r4], r2 @Load and increment src 118 vst1.8 {d0, d1}, [r5], r3 @Store and increment dst 119 vld1.8 {d0, d1}, [r4], r2 @Load and increment src 120 vst1.8 {d0, d1}, [r5], r3 @Store and increment dst [all …]
|
/external/libavc/encoder/arm/ |
D | ih264e_half_pel.s | 88 vmov.i8 d0, #5 145 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row0) 147 vmlsl.u8 q5, d30, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column2,row0) 149 vmlsl.u8 q6, d29, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column3,row0) 151 vmlsl.u8 q7, d28, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column1,row1) 154 vmlsl.u8 q8, d27, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column2,row1) 156 vmlsl.u8 q9, d26, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 (column3,row1) 158 vmlsl.u8 q4, d31, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column1,row0) 160 vmlsl.u8 q5, d30, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column2,row0) 162 vmlsl.u8 q6, d29, d0 @// a0 + a5 + 20a2 + 20a3 - 5a1 - 5a4 (column3,row0) [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | neont-VLD-reencoding.txt | 12 # CHECK: vld1.8 {d0[0]}, [r0], r0 @ encoding: [0xa0,0xf9,0x00,0x00] 13 # CHECK: vld1.8 {d0[1]}, [r0], r0 @ encoding: [0xa0,0xf9,0x20,0x00] 14 # CHECK: vld1.8 {d0[2]}, [r0], r0 @ encoding: [0xa0,0xf9,0x40,0x00] 15 # CHECK: vld1.8 {d0[3]}, [r0], r0 @ encoding: [0xa0,0xf9,0x60,0x00] 16 # CHECK: vld1.8 {d0[4]}, [r0], r0 @ encoding: [0xa0,0xf9,0x80,0x00] 17 # CHECK: vld1.8 {d0[5]}, [r0], r0 @ encoding: [0xa0,0xf9,0xa0,0x00] 18 # CHECK: vld1.8 {d0[6]}, [r0], r0 @ encoding: [0xa0,0xf9,0xc0,0x00] 19 # CHECK: vld1.8 {d0[7]}, [r0], r0 @ encoding: [0xa0,0xf9,0xe0,0x00] 30 # CHECK: vld1.16 {d0[0]}, [r0], r0 @ encoding: [0xa0,0xf9,0x00,0x04] 31 # CHECK: vld1.16 {d0[0]}, [r0:16], r0 @ encoding: [0xa0,0xf9,0x10,0x04] [all …]
|
D | fullfp16-neon-arm.txt | 4 # CHECK: vadd.f16 d0, d1, d2 9 # CHECK: vsub.f16 d0, d1, d2 14 # CHECK: vmul.f16 d0, d1, d2 24 # CHECK: vmla.f16 d0, d1, d2 34 # CHECK: vmls.f16 d0, d1, d2 44 # CHECK: vfma.f16 d0, d1, d2 49 # CHECK: vfms.f16 d0, d1, d2 94 # CHECK: vacge.f16 d0, d1, d2 99 # CHECK: vacgt.f16 d0, d1, d2 104 # CHECK: vabd.f16 d0, d1, d2 [all …]
|
D | fullfp16-neon-thumb.txt | 4 # CHECK: vadd.f16 d0, d1, d2 9 # CHECK: vsub.f16 d0, d1, d2 14 # CHECK: vmul.f16 d0, d1, d2 24 # CHECK: vmla.f16 d0, d1, d2 34 # CHECK: vmls.f16 d0, d1, d2 44 # CHECK: vfma.f16 d0, d1, d2 49 # CHECK: vfms.f16 d0, d1, d2 94 # CHECK: vacge.f16 d0, d1, d2 99 # CHECK: vacgt.f16 d0, d1, d2 104 # CHECK: vabd.f16 d0, d1, d2 [all …]
|
/external/v8/test/unittests/compiler/ |
D | diamond-unittest.cc | 38 Diamond d0(graph(), common(), p0); in TEST_F() local 40 d1.Chain(d0); in TEST_F() 41 EXPECT_THAT(d1.branch, IsBranch(p1, d0.merge)); in TEST_F() 42 EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start())); in TEST_F() 74 Diamond d0(graph(), common(), p0); in TEST_F() local 77 d1.Nest(d0, true); in TEST_F() 79 EXPECT_THAT(d0.branch, IsBranch(p0, graph()->start())); in TEST_F() 80 EXPECT_THAT(d0.if_true, IsIfTrue(d0.branch)); in TEST_F() 81 EXPECT_THAT(d0.if_false, IsIfFalse(d0.branch)); in TEST_F() 82 EXPECT_THAT(d0.merge, IsMerge(d1.merge, d0.if_false)); in TEST_F() [all …]
|
/external/libjpeg-turbo/ |
D | jdcol565.c | 114 INT32 d0 = dither_matrix[cinfo->output_scanline & DITHER_MASK]; in LOCAL() local 130 r = range_limit[DITHER_565_R(y + Crrtab[cr], d0)]; in LOCAL() 133 SCALEBITS)), d0)]; in LOCAL() 134 b = range_limit[DITHER_565_B(y + Cbbtab[cb], d0)]; in LOCAL() 144 r = range_limit[DITHER_565_R(y + Crrtab[cr], d0)]; in LOCAL() 147 SCALEBITS)), d0)]; in LOCAL() 148 b = range_limit[DITHER_565_B(y + Cbbtab[cb], d0)]; in LOCAL() 149 d0 = DITHER_ROTATE(d0); in LOCAL() 155 r = range_limit[DITHER_565_R(y + Crrtab[cr], d0)]; in LOCAL() 158 SCALEBITS)), d0)]; in LOCAL() [all …]
|
/external/libvpx/libvpx/vp9/common/x86/ |
D | vp9_idct_intrin_sse2.c | 55 __m128i d0 = _mm_cvtsi32_si128(*(const int *)(dest)); in vp9_iht4x4_16_add_sse2() local 57 d0 = _mm_unpacklo_epi32(d0, in vp9_iht4x4_16_add_sse2() 61 d0 = _mm_unpacklo_epi8(d0, zero); in vp9_iht4x4_16_add_sse2() 63 d0 = _mm_add_epi16(d0, in[0]); in vp9_iht4x4_16_add_sse2() 65 d0 = _mm_packus_epi16(d0, d2); in vp9_iht4x4_16_add_sse2() 67 *(int *)dest = _mm_cvtsi128_si32(d0); in vp9_iht4x4_16_add_sse2() 69 d0 = _mm_srli_si128(d0, 4); in vp9_iht4x4_16_add_sse2() 70 *(int *)(dest + stride) = _mm_cvtsi128_si32(d0); in vp9_iht4x4_16_add_sse2() 72 d0 = _mm_srli_si128(d0, 4); in vp9_iht4x4_16_add_sse2() 73 *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d0); in vp9_iht4x4_16_add_sse2() [all …]
|
/external/libhevc/common/arm/ |
D | ihevc_intra_pred_luma_horz.s | 148 vdup.8 q1,d0[7] 152 vdup.8 q2,d0[6] 156 vdup.8 q3,d0[5] 160 vdup.8 q4,d0[4] 164 vdup.8 q1,d0[3] 168 vdup.8 q2,d0[2] 172 vdup.8 q3,d0[1] 177 vdup.8 q4,d0[0] 224 vdup.8 q8,d0[7] 236 vdup.8 q1,d0[6] [all …]
|
D | ihevc_itrans_recon_16x16.s | 147 vld1.16 {d0,d1,d2,d3},[r14] @//d0,d1 are used for storing the constant data 205 @d0[0]= 64 d2[0]=64 206 @d0[1]= 90 d2[1]=57 207 @d0[2]= 89 d2[2]=50 208 @d0[3]= 87 d2[3]=43 242 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0) 243 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1) 247 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0) 257 vmull.s16 q6,d10,d0[0] 258 vmlal.s16 q6,d11,d0[2] [all …]
|
D | ihevc_intra_pred_chroma_horz.s | 134 vdup.16 q1,d0[3] 138 vdup.16 q2,d0[2] 142 vdup.16 q3,d0[1] 146 vdup.16 q4,d0[0] 215 vdup.16 q4,d0[3] 220 vdup.16 q5,d0[2] 223 vdup.16 q6,d0[1] 226 vdup.16 q7,d0[0] 229 vdup.16 q8,d0[3] 241 @vdup.8 q1,d0[2] [all …]
|
D | ihevc_itrans_recon_8x8.s | 172 vld1.16 {d0,d1},[r14] @//d0,d1 are used for storing the constant data 187 vmull.s16 q10,d2,d0[0] @// y0 * cos4(part of c0 and c1) 192 vmull.s16 q12,d6,d0[1] @// y1 * cos1(part of b0) 194 vmull.s16 q13,d6,d0[3] @// y1 * cos3(part of b1) 200 vmlal.s16 q12,d7,d0[3] @// y1 * cos1 + y3 * cos3(part of b0) 204 vmlsl.s16 q14,d7,d0[1] @// y1 * sin3 - y3 * cos1(part of b2) 208 vmull.s16 q11,d10,d0[0] @// y4 * cos4(part of c0 and c1) 210 vmull.s16 q3,d3,d0[2] @// y2 * cos2(part of d0) 235 vmlsl.s16 q13,d14,d0[1] @// y1 * cos3 - y3 * sin1 - y5 * cos1(part of b1) 237 vmlal.s16 q15,d14,d0[3] @// y1 * sin1 - y3 * sin3 + y5 * cos3(part of b3) [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | intrapred_neon_asm.asm | 38 vld1.32 {d0[0]}, [r2] 39 vst1.32 {d0[0]}, [r0], r1 40 vst1.32 {d0[0]}, [r0], r1 41 vst1.32 {d0[0]}, [r0], r1 42 vst1.32 {d0[0]}, [r0], r1 55 vld1.8 {d0}, [r2] 56 vst1.8 {d0}, [r0], r1 57 vst1.8 {d0}, [r0], r1 58 vst1.8 {d0}, [r0], r1 59 vst1.8 {d0}, [r0], r1 [all …]
|
/external/libpng/arm/ |
D | filter_neon.S | 70 vadd.u8 d0, d3, d4 71 vadd.u8 d1, d0, d5 74 vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]! 90 vadd.u8 d0, d3, d22 92 vadd.u8 d1, d0, d5 95 vst1.32 {d0[0]}, [r1,:32], r2 126 vhadd.u8 d0, d3, d16 127 vadd.u8 d0, d0, d4 128 vhadd.u8 d1, d0, d17 134 vst4.32 {d0[0],d1[0],d2[0],d3[0]},[r1,:128]! [all …]
|