/external/valgrind/main/VEX/useful/ |
D | fp_80_64.c | 44 static void convert_f80le_to_f64le_HW ( /*IN*/UChar* f80, /*OUT*/UChar* f64 ) in convert_f80le_to_f64le_HW() argument 48 : "r" (&f80[0]), "r" (&f64[0]) in convert_f80le_to_f64le_HW() 52 static void convert_f64le_to_f80le_HW ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le_HW() argument 56 : "r" (&f64[0]), "r" (&f80[0]) in convert_f64le_to_f80le_HW() 103 static void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le() argument 109 sign = toUChar( (f64[7] >> 7) & 1 ); in convert_f64le_to_f80le() 110 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); in convert_f64le_to_f80le() 120 (f64[6] & 0x0F) == 0 in convert_f64le_to_f80le() 121 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 in convert_f64le_to_f80le() 122 && f64[2] == 0 && f64[1] == 0 && f64[0] == 0 in convert_f64le_to_f80le() [all …]
|
/external/llvm/test/MC/ARM/ |
D | single-precision-fp.s | 5 vadd.f64 d0, d1, d2 6 vsub.f64 d2, d3, d4 7 vdiv.f64 d4, d5, d6 8 vmul.f64 d6, d7, d8 9 vnmul.f64 d8, d9, d10 11 @ CHECK-ERRORS-NEXT: vadd.f64 d0, d1, d2 13 @ CHECK-ERRORS-NEXT: vsub.f64 d2, d3, d4 15 @ CHECK-ERRORS-NEXT: vdiv.f64 d4, d5, d6 17 @ CHECK-ERRORS-NEXT: vmul.f64 d6, d7, d8 19 @ CHECK-ERRORS-NEXT: vnmul.f64 d8, d9, d10 [all …]
|
D | thumb-fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] 7 vcvtt.f16.f64 s5, d12 8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] 10 vcvtb.f64.f16 d3, s1 11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] 12 vcvtb.f16.f64 s4, d1 13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] 16 vcvttge.f64.f16 d3, s1 17 @ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] [all …]
|
D | fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] 7 vcvtt.f16.f64 s5, d12 8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] 10 vcvtb.f64.f16 d3, s1 11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0xee] 12 vcvtb.f16.f64 s4, d1 13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xee] 15 vcvttge.f64.f16 d3, s1 16 @ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xae] [all …]
|
D | simple-fp-encoding.s | 3 vadd.f64 d16, d17, d16 5 @ CHECK: vadd.f64 d16, d17, d16 @ encoding: [0xa0,0x0b,0x71,0xee] 8 vsub.f64 d16, d17, d16 10 @ CHECK: vsub.f64 d16, d17, d16 @ encoding: [0xe0,0x0b,0x71,0xee] 13 vdiv.f64 d16, d17, d16 16 vdiv.f64 d5, d7 18 @ CHECK: vdiv.f64 d16, d17, d16 @ encoding: [0xa0,0x0b,0xc1,0xee] 21 @ CHECK: vdiv.f64 d5, d5, d7 @ encoding: [0x07,0x5b,0x85,0xee] 24 vmul.f64 d16, d17, d16 25 vmul.f64 d20, d17 [all …]
|
D | directive-arch_extension-fp.s | 35 vselgt.f64 d0, d0, d0 37 vselge.f64 d0, d0, d0 39 vseleq.f64 d0, d0, d0 41 vselvs.f64 d0, d0, d0 43 vmaxnm.f64 d0, d0, d0 45 vminnm.f64 d0, d0, d0 48 vcvtb.f64.f16 d0, s0 50 vcvtb.f16.f64 s0, d0 52 vcvtt.f64.f16 d0, s0 54 vcvtt.f16.f64 s0, d0 [all …]
|
D | directive-arch_extension-simd.s | 24 vmaxnm.f64 d0, d0, d0 26 vminnm.f64 d0, d0, d0 33 vcvta.s32.f64 s0, d0 35 vcvta.u32.f64 s0, d0 41 vcvtn.s32.f64 s0, d0 43 vcvtn.u32.f64 s0, d0 49 vcvtp.s32.f64 s0, d0 51 vcvtp.u32.f64 s0, d0 57 vcvtm.s32.f64 s0, d0 59 vcvtm.u32.f64 s0, d0 [all …]
|
D | invalid-fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ V7-NOT: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] 7 vcvtt.f16.f64 s5, d12 8 @ V7-NOT: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] 39 vselgt.f64 s3, s2, s1 43 vselgt.f64 q0, s3, q1 48 vminnm.f64 s3, s2, s1 52 vmaxnm.f64 q0, s3, q1 54 vmaxnmgt.f64 q0, s3, q1 57 vcvta.s32.f64 d3, s2 [all …]
|
D | vfp4.s | 7 @ ARM: vfma.f64 d16, d18, d17 @ encoding: [0xa1,0x0b,0xe2,0xee] 8 @ THUMB: vfma.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xa1,0x0b] 10 @ THUMB_V7EM-ERRORS-NEXT: vfma.f64 d16, d18, d17 11 vfma.f64 d16, d18, d17 30 @ ARM: vfnma.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xd2,0xee] 31 @ THUMB: vfnma.f64 d16, d18, d17 @ encoding: [0xd2,0xee,0xe1,0x0b] 33 @ THUMB_V7EM-ERRORS-NEXT: vfnma.f64 d16, d18, d17 34 vfnma.f64 d16, d18, d17 41 @ ARM: vfms.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xe2,0xee] 42 @ THUMB: vfms.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xe1,0x0b] [all …]
|
/external/valgrind/main/VEX/priv/ |
D | guest_generic_x87.c | 105 void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le() argument 111 sign = toUChar( (f64[7] >> 7) & 1 ); in convert_f64le_to_f80le() 112 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); in convert_f64le_to_f80le() 122 (f64[6] & 0x0F) == 0 in convert_f64le_to_f80le() 123 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 in convert_f64le_to_f80le() 124 && f64[2] == 0 && f64[1] == 0 && f64[0] == 0 in convert_f64le_to_f80le() 147 if (read_bit_array(f64, i)) in convert_f64le_to_f80le() 156 read_bit_array( f64, i ) ); in convert_f64le_to_f80le() 194 if (f64[6] & 8) { in convert_f64le_to_f80le() 223 f80[7] = toUChar( (1 << 7) | ((f64[6] << 3) & 0x78) in convert_f64le_to_f80le() [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | thumb-fp-armv8.txt | 4 # CHECK: vcvtt.f64.f16 d3, s1 7 # CHECK: vcvtt.f16.f64 s5, d12 10 # CHECK: vcvtb.f64.f16 d3, s1 13 # CHECK: vcvtb.f16.f64 s4, d1 17 # CHECK: vcvttge.f64.f16 d3, s1 21 # CHECK: vcvttgt.f16.f64 s5, d12 25 # CHECK: vcvtbeq.f64.f16 d3, s1 29 # CHECK: vcvtblt.f16.f64 s4, d1 36 # CHECK: vcvta.s32.f64 s2, d3 42 # CHECK: vcvtn.s32.f64 s6, d23 [all …]
|
D | fp-armv8.txt | 4 # CHECK: vcvtt.f64.f16 d3, s1 7 # CHECK: vcvtt.f16.f64 s5, d12 10 # CHECK: vcvtb.f64.f16 d3, s1 13 # CHECK: vcvtb.f16.f64 s4, d1 16 # CHECK: vcvttge.f64.f16 d3, s1 19 # CHECK: vcvttgt.f16.f64 s5, d12 22 # CHECK: vcvtbeq.f64.f16 d3, s1 25 # CHECK: vcvtblt.f16.f64 s4, d1 32 # CHECK: vcvta.s32.f64 s2, d3 38 # CHECK: vcvtn.s32.f64 s6, d23 [all …]
|
/external/llvm/test/CodeGen/XCore/ |
D | float-intrinsics.ll | 2 declare double @llvm.cos.f64(double) 3 declare double @llvm.exp.f64(double) 4 declare double @llvm.exp2.f64(double) 5 declare double @llvm.log.f64(double) 6 declare double @llvm.log10.f64(double) 7 declare double @llvm.log2.f64(double) 8 declare double @llvm.pow.f64(double, double) 9 declare double @llvm.powi.f64(double, i32) 10 declare double @llvm.sin.f64(double) 11 declare double @llvm.sqrt.f64(double) [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 26 // Handle all vector types as either f64 or v2f64. 27 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 30 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 31 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 37 CCIfType<[f64], CCAssignToStack<8, 4>>, 45 // Handle all vector types as either f64 or v2f64. 46 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 49 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 59 // Handle all vector types as either f64 or v2f64. 60 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, [all …]
|
/external/valgrind/main/none/tests/amd64/ |
D | nan80and64.c | 61 static void rev64 ( UChar* f64 ) in rev64() argument 63 SWAPC( f64[0], f64[7] ); in rev64() 64 SWAPC( f64[1], f64[6] ); in rev64() 65 SWAPC( f64[2], f64[5] ); in rev64() 66 SWAPC( f64[3], f64[4] ); in rev64()
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonInstrInfoV5.td | 71 [(set DoubleRegs:$dst, (f64 (load ADDRriS11_3:$addr)))]>, 79 [(set DoubleRegs:$dst, (f64 (load (add IntRegs:$src1, 115 [(store (f64 DoubleRegs:$src1), ADDRriS11_2:$addr)]>, 123 [(store (f64 DoubleRegs:$src3), 178 (OpNode (f64 DoubleRegs:$b), (f64 DoubleRegs:$c)))]>, 214 def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (f64 DoubleRegs:$src2))), 218 def : Pat <(i1 (setolt (f64 DoubleRegs:$src1), (fpimm:$src2))), 219 (i1 (FCMPOGT64_rr (f64 (CONST64_Float_Real fpimm:$src2)), 220 (f64 DoubleRegs:$src1)))>, 224 def : Pat <(i1 (setugt (f64 DoubleRegs:$src1), (fpimm:$src2))), [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | fp-round-01.ll | 15 ; Test rint for f64. 16 declare double @llvm.rint.f64(double %f) 21 %res = call double @llvm.rint.f64(double %f) 47 ; Test nearbyint for f64. 48 declare double @llvm.nearbyint.f64(double %f) 53 %res = call double @llvm.nearbyint.f64(double %f) 70 ; Test floor for f64. 71 declare double @llvm.floor.f64(double %f) 76 %res = call double @llvm.floor.f64(double %f) 93 ; Test ceil for f64. [all …]
|
D | fp-sqrt-02.ll | 5 declare double @llvm.sqrt.f64(double %f) 13 %res = call double @llvm.sqrt.f64(double %val) 23 %res = call double @llvm.sqrt.f64(double %val) 34 %res = call double @llvm.sqrt.f64(double %val) 47 %res = call double @llvm.sqrt.f64(double %val) 59 %res = call double @llvm.sqrt.f64(double %val) 72 %res = call double @llvm.sqrt.f64(double %val) 100 %sqrt0 = call double @llvm.sqrt.f64(double %val0) 101 %sqrt1 = call double @llvm.sqrt.f64(double %val1) 102 %sqrt2 = call double @llvm.sqrt.f64(double %val2) [all …]
|
/external/llvm/test/Transforms/BBVectorize/X86/ |
D | simple-int.ll | 1 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v… 4 declare double @llvm.fma.f64(double, double, double) 5 declare double @llvm.fmuladd.f64(double, double, double) 6 declare double @llvm.cos.f64(double) 7 declare double @llvm.powi.f64(double, i32) 13 %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1) 14 %Y2 = call double @llvm.fma.f64(double %X2, double %A2, double %C2) 27 %Y1 = call double @llvm.fmuladd.f64(double %X1, double %A1, double %C1) 28 %Y2 = call double @llvm.fmuladd.f64(double %X2, double %A2, double %C2) 41 %Y1 = call double @llvm.cos.f64(double %X1) [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | fnmscs.ll | 64 ; VFP2: vnmla.f64 67 ; NEON: vnmla.f64 70 ; A8U: vnmul.f64 d 71 ; A8U: vsub.f64 d 74 ; A8: vnmul.f64 d 75 ; A8: vsub.f64 d 85 ; VFP2: vnmla.f64 88 ; NEON: vnmla.f64 91 ; A8U: vnmul.f64 d 92 ; A8U: vsub.f64 d [all …]
|
D | 2011-11-09-IllegalVectorFPIntConvert.ll | 5 ; CHECK: vcvt.s32.f64 6 ; CHECK: vcvt.s32.f64 14 ; CHECK: vcvt.u32.f64 15 ; CHECK: vcvt.u32.f64 23 ; CHECK: vcvt.f64.s32 24 ; CHECK: vcvt.f64.s32 32 ; CHECK: vcvt.f64.u32 33 ; CHECK: vcvt.f64.u32
|
D | neon_fpconv.ll | 5 ; CHECK: vcvt.f32.f64 [[S0:s[0-9]+]], [[D0:d[0-9]+]] 6 ; CHECK: vcvt.f32.f64 [[S1:s[0-9]+]], [[D1:d[0-9]+]] 12 ; CHECK: vcvt.f64.f32 [[D0:d[0-9]+]], [[S0:s[0-9]+]] 13 ; CHECK: vcvt.f64.f32 [[D1:d[0-9]+]], [[S1:s[0-9]+]] 24 ; CHECK-NEXT: vcvt.f64.s32 25 ; CHECK-NEXT: vcvt.f64.s32 36 ; CHECK-NEXT: vcvt.f64.u32 37 ; CHECK-NEXT: vcvt.f64.u32
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrVSX.td | 53 [(set f64:$XT, (load xoaddr:$src))]>; 74 [(store f64:$XT, xoaddr:$dst)]>; 91 [(set f64:$XT, (fadd f64:$XA, f64:$XB))]>; 95 [(set f64:$XT, (fmul f64:$XA, f64:$XB))]>; 122 [(set f64:$XT, (fsub f64:$XA, f64:$XB))]>; 139 [(set f64:$XT, (fma f64:$XA, f64:$XB, f64:$XTi))]>, 155 [(set f64:$XT, (fma f64:$XA, f64:$XB, (fneg f64:$XTi)))]>, 171 [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, f64:$XTi)))]>, 187 [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, (fneg f64:$XTi))))]>, 330 [(set f64:$XT, (fdiv f64:$XA, f64:$XB))]>; [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-cvt.ll | 26 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A) 34 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A) 40 declare i32 @llvm.aarch64.neon.fcvtas.i32.f64(double) nounwind readnone 41 declare i64 @llvm.aarch64.neon.fcvtas.i64.f64(double) nounwind readnone 66 %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %A) 74 %tmp3 = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %A) 80 declare i32 @llvm.aarch64.neon.fcvtau.i32.f64(double) nounwind readnone 81 declare i64 @llvm.aarch64.neon.fcvtau.i64.f64(double) nounwind readnone 106 %tmp3 = call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %A) 114 %tmp3 = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %A) [all …]
|
/external/valgrind/main/none/tests/s390x/ |
D | pfpo.stdout.exp | 12 f64 -> d32: round=0 123456789999.565674 -> 26a4d2e8 ret=0 cc=1 13 f64 -> d32: round=0 0.000000 -> 0 ret=0 cc=1 14 f64 -> d32: round=0 1797693134862315708145274237317043567980705675258449965989174768031572607800285… 15 f64 -> d64: round=0 123456789999.565674 -> 262934b9c7fa7f57 ret=0 cc=1 16 f64 -> d64: round=0 0.000000 -> 92d251ce3ea1d01 ret=0 cc=1 17 f64 -> d64: round=0 1797693134862315708145274237317043567980705675258449965989174768031572607800285… 18 f64 -> d128: round=0 123456789999.565674 -> 22050000000028e56f3cffb97734b8a5 ret=0 cc=0 19 f64 -> d128: round=0 0.000000 -> 29b2d251ce3ea1d016ac1a4cb976ca04 ret=0 cc=1 20 f64 -> d128: round=0 179769313486231570814527423731704356798070567525844996598917476803157260780028… 66 f64 -> d32: round=1 123456789999.565674 -> 26a4d2e8 ret=0 cc=1 [all …]
|