/external/llvm-project/llvm/test/MC/ARM/ |
D | directive-arch_extension-fp.s | 22 vselgt.f32 s0, s0, s0 24 vselge.f32 s0, s0, s0 26 vseleq.f32 s0, s0, s0 28 vselvs.f32 s0, s0, s0 30 vmaxnm.f32 s0, s0, s0 32 vminnm.f32 s0, s0, s0 48 vcvtb.f64.f16 d0, s0 50 vcvtb.f16.f64 s0, d0 52 vcvtt.f64.f16 d0, s0 54 vcvtt.f16.f64 s0, d0 [all …]
|
D | fullfp16.s | 6 vadd.f16 s0, s1, s0 7 @ ARM: vadd.f16 s0, s1, s0 @ encoding: [0x80,0x09,0x30,0xee] 8 @ THUMB: vadd.f16 s0, s1, s0 @ encoding: [0x30,0xee,0x80,0x09] 10 vsub.f16 s0, s1, s0 11 @ ARM: vsub.f16 s0, s1, s0 @ encoding: [0xc0,0x09,0x30,0xee] 12 @ THUMB: vsub.f16 s0, s1, s0 @ encoding: [0x30,0xee,0xc0,0x09] 14 vdiv.f16 s0, s1, s0 15 @ ARM: vdiv.f16 s0, s1, s0 @ encoding: [0x80,0x09,0x80,0xee] 16 @ THUMB: vdiv.f16 s0, s1, s0 @ encoding: [0x80,0xee,0x80,0x09] 18 vmul.f16 s0, s1, s0 [all …]
|
D | directive-arch_extension-simd.s | 19 vmaxnm.f32 s0, s0, s0 21 vminnm.f32 s0, s0, s0 29 vcvta.s32.f32 s0, s0 31 vcvta.u32.f32 s0, s0 33 vcvta.s32.f64 s0, d0 35 vcvta.u32.f64 s0, d0 37 vcvtn.s32.f32 s0, s0 39 vcvtn.u32.f32 s0, s0 41 vcvtn.s32.f64 s0, d0 43 vcvtn.u32.f64 s0, d0 [all …]
|
/external/llvm/test/MC/ARM/ |
D | fullfp16.s | 4 vadd.f16 s0, s1, s0 5 @ ARM: vadd.f16 s0, s1, s0 @ encoding: [0x80,0x09,0x30,0xee] 6 @ THUMB: vadd.f16 s0, s1, s0 @ encoding: [0x30,0xee,0x80,0x09] 8 vsub.f16 s0, s1, s0 9 @ ARM: vsub.f16 s0, s1, s0 @ encoding: [0xc0,0x09,0x30,0xee] 10 @ THUMB: vsub.f16 s0, s1, s0 @ encoding: [0x30,0xee,0xc0,0x09] 12 vdiv.f16 s0, s1, s0 13 @ ARM: vdiv.f16 s0, s1, s0 @ encoding: [0x80,0x09,0x80,0xee] 14 @ THUMB: vdiv.f16 s0, s1, s0 @ encoding: [0x80,0xee,0x80,0x09] 16 vmul.f16 s0, s1, s0 [all …]
|
D | directive-arch_extension-fp.s | 22 vselgt.f32 s0, s0, s0 24 vselge.f32 s0, s0, s0 26 vseleq.f32 s0, s0, s0 28 vselvs.f32 s0, s0, s0 30 vmaxnm.f32 s0, s0, s0 32 vminnm.f32 s0, s0, s0 48 vcvtb.f64.f16 d0, s0 50 vcvtb.f16.f64 s0, d0 52 vcvtt.f64.f16 d0, s0 54 vcvtt.f16.f64 s0, d0 [all …]
|
D | directive-arch_extension-simd.s | 19 vmaxnm.f32 s0, s0, s0 21 vminnm.f32 s0, s0, s0 29 vcvta.s32.f32 s0, s0 31 vcvta.u32.f32 s0, s0 33 vcvta.s32.f64 s0, d0 35 vcvta.u32.f64 s0, d0 37 vcvtn.s32.f32 s0, s0 39 vcvtn.u32.f32 s0, s0 41 vcvtn.s32.f64 s0, d0 43 vcvtn.u32.f64 s0, d0 [all …]
|
/external/libvpx/libvpx/vpx_dsp/arm/ |
D | sum_squares_neon.c | 21 int32x4_t s0; in vpx_sum_squares_2d_i16_neon() local 28 s0 = vmull_s16(s[0], s[0]); in vpx_sum_squares_2d_i16_neon() 29 s0 = vmlal_s16(s0, s[1], s[1]); in vpx_sum_squares_2d_i16_neon() 30 s0 = vmlal_s16(s0, s[2], s[2]); in vpx_sum_squares_2d_i16_neon() 31 s0 = vmlal_s16(s0, s[3], s[3]); in vpx_sum_squares_2d_i16_neon() 32 s1 = vpadd_u32(vget_low_u32(vreinterpretq_u32_s32(s0)), in vpx_sum_squares_2d_i16_neon() 33 vget_high_u32(vreinterpretq_u32_s32(s0))); in vpx_sum_squares_2d_i16_neon() 41 int32x4_t s0 = vdupq_n_s32(0); in vpx_sum_squares_2d_i16_neon() local 55 s0 = vmlal_s16(s0, vget_low_s16(s[0]), vget_low_s16(s[0])); in vpx_sum_squares_2d_i16_neon() 56 s0 = vmlal_s16(s0, vget_low_s16(s[1]), vget_low_s16(s[1])); in vpx_sum_squares_2d_i16_neon() [all …]
|
/external/llvm-project/llvm/test/CodeGen/VE/Scalar/ |
D | ctlz.ll | 14 ; CHECK-NEXT: ldz %s0, %s0 15 ; CHECK-NEXT: lea %s0, 64(, %s0) 16 ; CHECK-NEXT: cmov.l.ne %s0, %s1, %s2 26 ; CHECK-NEXT: ldz %s0, %s0 35 ; CHECK-NEXT: and %s0, %s0, (32)0 36 ; CHECK-NEXT: ldz %s0, %s0 37 ; CHECK-NEXT: lea %s0, -32(, %s0) 38 ; CHECK-NEXT: and %s0, %s0, (32)0 47 ; CHECK-NEXT: ldz %s0, %s0 48 ; CHECK-NEXT: lea %s0, -32(, %s0) [all …]
|
D | br_jt.ll | 13 ; CHECK-NEXT: and %s0, %s0, (32)0 14 ; CHECK-NEXT: breq.w 1, %s0, .LBB{{[0-9]+}}_1 16 ; CHECK-NEXT: breq.w 4, %s0, .LBB{{[0-9]+}}_5 18 ; CHECK-NEXT: brne.w 2, %s0, .LBB{{[0-9]+}}_6 20 ; CHECK-NEXT: or %s0, 0, (0)1 21 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 24 ; CHECK-NEXT: or %s0, 3, (0)1 25 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 28 ; CHECK-NEXT: or %s0, 7, (0)1 30 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 [all …]
|
D | select_cc.ll | 12 ; CHECK-NEXT: xor %s0, %s0, %s1 13 ; CHECK-NEXT: cmov.w.ne %s2, %s3, %s0 14 ; CHECK-NEXT: adds.w.zx %s0, %s2, (0)1 25 ; CHECK-NEXT: cmps.w.sx %s0, %s0, %s1 26 ; CHECK-NEXT: cmov.w.eq %s3, %s2, %s0 27 ; CHECK-NEXT: adds.w.zx %s0, %s3, (0)1 38 ; CHECK-NEXT: cmps.w.sx %s0, %s0, %s1 39 ; CHECK-NEXT: cmov.w.eq %s3, %s2, %s0 40 ; CHECK-NEXT: adds.w.zx %s0, %s3, (0)1 51 ; CHECK-NEXT: cmps.w.sx %s0, %s0, %s1 [all …]
|
D | addition.ll | 6 ; CHECK-NEXT: adds.w.sx %s0, %s1, %s0 7 ; CHECK-NEXT: sll %s0, %s0, 56 8 ; CHECK-NEXT: sra.l %s0, %s0, 56 17 ; CHECK-NEXT: adds.w.sx %s0, %s1, %s0 18 ; CHECK-NEXT: sll %s0, %s0, 48 19 ; CHECK-NEXT: sra.l %s0, %s0, 48 28 ; CHECK-NEXT: adds.w.sx %s0, %s1, %s0 29 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 38 ; CHECK-NEXT: adds.l %s0, %s1, %s0 48 ; CHECK-NEXT: adds.l %s0, %s2, %s0 [all …]
|
D | subtraction.ll | 6 ; CHECK-NEXT: subs.w.sx %s0, %s0, %s1 7 ; CHECK-NEXT: sll %s0, %s0, 56 8 ; CHECK-NEXT: sra.l %s0, %s0, 56 17 ; CHECK-NEXT: subs.w.sx %s0, %s0, %s1 18 ; CHECK-NEXT: sll %s0, %s0, 48 19 ; CHECK-NEXT: sra.l %s0, %s0, 48 28 ; CHECK-NEXT: subs.w.sx %s0, %s0, %s1 29 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 38 ; CHECK-NEXT: subs.l %s0, %s0, %s1 48 ; CHECK-NEXT: cmpu.l %s3, %s0, %s2 [all …]
|
D | right_shift.ll | 6 ; CHECK-NEXT: sra.w.sx %s0, %s0, %s1 7 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 19 ; CHECK-NEXT: sra.w.sx %s0, %s0, %s1 20 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 32 ; CHECK-NEXT: sra.w.sx %s0, %s0, %s1 41 ; CHECK-NEXT: sra.l %s0, %s0, %s1 50 ; CHECK-NEXT: and %s0, %s0, (32)0 51 ; CHECK-NEXT: srl %s0, %s0, %s1 52 ; CHECK-NEXT: adds.w.zx %s0, %s0, (0)1 64 ; CHECK-NEXT: and %s0, %s0, (32)0 [all …]
|
D | load_off.ll | 16 ; CHECK-NEXT: lea %s0, bufi8@lo 17 ; CHECK-NEXT: and %s0, %s0, (32)0 18 ; CHECK-NEXT: lea.sl %s0, bufi8@hi(, %s0) 19 ; CHECK-NEXT: ld1b.sx %s0, 2(, %s0) 30 ; CHECK-NEXT: lea %s0, bufi16@lo 31 ; CHECK-NEXT: and %s0, %s0, (32)0 32 ; CHECK-NEXT: lea.sl %s0, bufi16@hi(, %s0) 33 ; CHECK-NEXT: ld2b.sx %s0, 4(, %s0) 44 ; CHECK-NEXT: lea %s0, bufi32@lo 45 ; CHECK-NEXT: and %s0, %s0, (32)0 [all …]
|
D | load_gv.ll | 16 ; CHECK-NEXT: lea %s0, vf128@lo 17 ; CHECK-NEXT: and %s0, %s0, (32)0 18 ; CHECK-NEXT: lea.sl %s2, vf128@hi(, %s0) 19 ; CHECK-NEXT: ld %s0, 8(, %s2) 30 ; CHECK-NEXT: lea %s0, vf64@lo 31 ; CHECK-NEXT: and %s0, %s0, (32)0 32 ; CHECK-NEXT: lea.sl %s0, vf64@hi(, %s0) 33 ; CHECK-NEXT: ld %s0, (, %s0) 43 ; CHECK-NEXT: lea %s0, vf32@lo 44 ; CHECK-NEXT: and %s0, %s0, (32)0 [all …]
|
D | left_shift.ll | 6 ; CHECK-NEXT: sla.w.sx %s0, %s0, %s1 7 ; CHECK-NEXT: sll %s0, %s0, 56 8 ; CHECK-NEXT: sra.l %s0, %s0, 56 20 ; CHECK-NEXT: sla.w.sx %s0, %s0, %s1 21 ; CHECK-NEXT: sll %s0, %s0, 48 22 ; CHECK-NEXT: sra.l %s0, %s0, 48 34 ; CHECK-NEXT: sla.w.sx %s0, %s0, %s1 43 ; CHECK-NEXT: sll %s0, %s0, %s1 52 ; CHECK-NEXT: sla.w.sx %s0, %s0, %s1 53 ; CHECK-NEXT: and %s0, %s0, (56)0 [all …]
|
D | cttz.ll | 12 ; CHECK-NEXT: cmps.l %s2, %s0, (0)1 13 ; CHECK-NEXT: lea %s3, -1(, %s0) 14 ; CHECK-NEXT: nnd %s0, %s0, %s3 15 ; CHECK-NEXT: pcnt %s3, %s0 16 ; CHECK-NEXT: lea %s0, -1(, %s1) 17 ; CHECK-NEXT: nnd %s0, %s1, %s0 18 ; CHECK-NEXT: pcnt %s0, %s0 19 ; CHECK-NEXT: lea %s0, 64(, %s0) 20 ; CHECK-NEXT: cmov.l.ne %s0, %s3, %s2 30 ; CHECK-NEXT: lea %s1, -1(, %s0) [all …]
|
D | multiply.ll | 6 ; CHECK-NEXT: muls.w.sx %s0, %s1, %s0 7 ; CHECK-NEXT: sll %s0, %s0, 56 8 ; CHECK-NEXT: sra.l %s0, %s0, 56 17 ; CHECK-NEXT: muls.w.sx %s0, %s1, %s0 18 ; CHECK-NEXT: sll %s0, %s0, 48 19 ; CHECK-NEXT: sra.l %s0, %s0, 48 28 ; CHECK-NEXT: muls.w.sx %s0, %s1, %s0 29 ; CHECK-NEXT: adds.w.sx %s0, %s0, (0)1 38 ; CHECK-NEXT: muls.l %s0, %s1, %s0 48 ; CHECK-NEXT: or %s5, 0, %s0 [all …]
|
/external/llvm-project/llvm/test/CodeGen/VE/Vector/ |
D | vec_broadcast.ll | 9 ; CHECK-NEXT: vbrd %v0, %s0 19 ; CHECK-NEXT: lea %s0, 256 20 ; CHECK-NEXT: lvl %s0 33 ; CHECK-NEXT: vbrd %v0, %s0 43 ; CHECK-NEXT: lea %s0, 256 44 ; CHECK-NEXT: lvl %s0 55 ; CHECK-NEXT: and %s0, %s0, (32)0 58 ; CHECK-NEXT: vbrd %v0, %s0 68 ; CHECK-NEXT: lea %s0, 256 69 ; CHECK-NEXT: lvl %s0 [all …]
|
/external/llvm-project/llvm/test/MC/AMDGPU/ |
D | mtbuf-gfx10.s | 114 tbuffer_store_format_x v0, v1, s[0:3] format:0 s0 idxen 117 tbuffer_store_format_x v0, v1, s[0:3] format:1 s0 idxen 123 tbuffer_load_format_d16_x v0, off, s[0:3] s0 127 tbuffer_store_format_x v0, v1, s[0:3] format:0 s0 idxen 130 tbuffer_store_format_x v0, v1, s[0:3] format:0 s0 idxen 140 tbuffer_load_format_d16_x v0, off, s[0:3], format:128, s0 152 tbuffer_load_format_d16_x v0, off, s[0:3], format:1,, s0 155 tbuffer_load_format_d16_x v0, off, s[0:3], format:1:, s0 158 tbuffer_load_format_d16_x v0, off, s[0:3],, format:1, s0 165 tbuffer_store_format_xyzw v[1:4], v1, s[4:7], s0 format:0 idxen [all …]
|
D | mtbuf.s | 96 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:-1 nfmt:1 s0 99 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:16 nfmt:1 s0 102 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:1 nfmt:-1 s0 105 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:1 nfmt:8 s0 111 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7],, dfmt:1 nfmt:1 s0 114 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:1,, nfmt:1 s0 117 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:1 nfmt:1,, s0 120 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:1 dfmt:1 s0 123 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] nfmt:1 nfmt:1 s0 126 tbuffer_store_format_xyzw v[1:4], off, ttmp[4:7] dfmt:1 nfmt:1 dfmt:1 s0 [all …]
|
D | gfx10_asm_err.s | 67 s_and_saveexec_b32 s0, s1 70 s_or_saveexec_b32 s0, s1 73 s_xor_saveexec_b32 s0, s1 76 s_andn2_saveexec_b32 s0, s1 79 s_orn2_saveexec_b32 s0, s1 82 s_nand_saveexec_b32 s0, s1 85 s_nor_saveexec_b32 s0, s1 88 s_xnor_saveexec_b32 s0, s1 91 s_andn1_saveexec_b32 s0, s1 94 s_orn1_saveexec_b32 s0, s1 [all …]
|
/external/llvm-project/llvm/test/MC/RISCV/ |
D | compress-rv32i.s | 30 # CHECK-ALIAS: addi s0, sp, 1020 31 # CHECK-INST: c.addi4spn s0, sp, 1020 33 addi s0, sp, 1020 36 # CHECK-ALIAS: lw s0, 124(a5) 37 # CHECK-INST: c.lw s0, 124(a5) 39 lw s0, 124(a5) 42 # CHECK-ALIAS: sw s0, 124(a5) 43 # CHECK-INST: c.sw s0, 124(a5) 45 sw s0, 124(a5) 78 # CHECK-ALIAS: srli s0, s0, 31 [all …]
|
/external/capstone/suite/MC/Mips/ |
D | nabi-regs.s.cs | 2 0x02,0x04,0x80,0x20 = add $s0, $s0, $a0 3 0x02,0x06,0x80,0x20 = add $s0, $s0, $a2 4 0x02,0x07,0x80,0x20 = add $s0, $s0, $a3 5 0x02,0x08,0x80,0x20 = add $s0, $s0, $t0 6 0x02,0x09,0x80,0x20 = add $s0, $s0, $t1 7 0x02,0x0a,0x80,0x20 = add $s0, $s0, $t2 8 0x02,0x0b,0x80,0x20 = add $s0, $s0, $t3 9 0x02,0x0c,0x80,0x20 = add $s0, $s0, $t4 10 0x02,0x0d,0x80,0x20 = add $s0, $s0, $t5 11 0x02,0x0e,0x80,0x20 = add $s0, $s0, $t6 [all …]
|
/external/libvpx/libvpx/vpx_dsp/x86/ |
D | avg_intrin_sse2.c | 20 __m128i u0, s0, d0, diff, maxabsdiff, minabsdiff, negdiff, absdiff0, absdiff; in vpx_minmax_8x8_sse2() local 23 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s)), u0); in vpx_minmax_8x8_sse2() 25 diff = _mm_subs_epi16(s0, d0); in vpx_minmax_8x8_sse2() 29 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + p)), u0); in vpx_minmax_8x8_sse2() 31 diff = _mm_subs_epi16(s0, d0); in vpx_minmax_8x8_sse2() 37 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 2 * p)), u0); in vpx_minmax_8x8_sse2() 39 diff = _mm_subs_epi16(s0, d0); in vpx_minmax_8x8_sse2() 45 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 3 * p)), u0); in vpx_minmax_8x8_sse2() 47 diff = _mm_subs_epi16(s0, d0); in vpx_minmax_8x8_sse2() 53 s0 = _mm_unpacklo_epi8(_mm_loadl_epi64((const __m128i *)(s + 4 * p)), u0); in vpx_minmax_8x8_sse2() [all …]
|