/external/valgrind/VEX/useful/ |
D | fp_80_64.c | 44 static void convert_f80le_to_f64le_HW ( /*IN*/UChar* f80, /*OUT*/UChar* f64 ) in convert_f80le_to_f64le_HW() argument 48 : "r" (&f80[0]), "r" (&f64[0]) in convert_f80le_to_f64le_HW() 52 static void convert_f64le_to_f80le_HW ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le_HW() argument 56 : "r" (&f64[0]), "r" (&f80[0]) in convert_f64le_to_f80le_HW() 103 static void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le() argument 109 sign = toUChar( (f64[7] >> 7) & 1 ); in convert_f64le_to_f80le() 110 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); in convert_f64le_to_f80le() 120 (f64[6] & 0x0F) == 0 in convert_f64le_to_f80le() 121 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 in convert_f64le_to_f80le() 122 && f64[2] == 0 && f64[1] == 0 && f64[0] == 0 in convert_f64le_to_f80le() [all …]
|
/external/llvm/test/MC/ARM/ |
D | single-precision-fp.s | 5 vadd.f64 d0, d1, d2 6 vsub.f64 d2, d3, d4 7 vdiv.f64 d4, d5, d6 8 vmul.f64 d6, d7, d8 9 vnmul.f64 d8, d9, d10 11 @ CHECK-ERRORS-NEXT: vadd.f64 d0, d1, d2 13 @ CHECK-ERRORS-NEXT: vsub.f64 d2, d3, d4 15 @ CHECK-ERRORS-NEXT: vdiv.f64 d4, d5, d6 17 @ CHECK-ERRORS-NEXT: vmul.f64 d6, d7, d8 19 @ CHECK-ERRORS-NEXT: vnmul.f64 d8, d9, d10 [all …]
|
D | fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] 7 vcvtt.f16.f64 s5, d12 8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] 10 vcvtb.f64.f16 d3, s1 11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0x60,0x3b,0xb2,0xee] 12 vcvtb.f16.f64 s4, d1 13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0x41,0x2b,0xb3,0xee] 15 vcvttge.f64.f16 d3, s1 16 @ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xae] [all …]
|
D | thumb-fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ CHECK: vcvtt.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] 7 vcvtt.f16.f64 s5, d12 8 @ CHECK: vcvtt.f16.f64 s5, d12 @ encoding: [0xf3,0xee,0xcc,0x2b] 10 vcvtb.f64.f16 d3, s1 11 @ CHECK: vcvtb.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0x60,0x3b] 12 vcvtb.f16.f64 s4, d1 13 @ CHECK: vcvtb.f16.f64 s4, d1 @ encoding: [0xb3,0xee,0x41,0x2b] 16 vcvttge.f64.f16 d3, s1 17 @ CHECK: vcvttge.f64.f16 d3, s1 @ encoding: [0xb2,0xee,0xe0,0x3b] [all …]
|
D | directive-arch_extension-fp.s | 35 vselgt.f64 d0, d0, d0 37 vselge.f64 d0, d0, d0 39 vseleq.f64 d0, d0, d0 41 vselvs.f64 d0, d0, d0 43 vmaxnm.f64 d0, d0, d0 45 vminnm.f64 d0, d0, d0 48 vcvtb.f64.f16 d0, s0 50 vcvtb.f16.f64 s0, d0 52 vcvtt.f64.f16 d0, s0 54 vcvtt.f16.f64 s0, d0 [all …]
|
D | directive-arch_extension-simd.s | 24 vmaxnm.f64 d0, d0, d0 26 vminnm.f64 d0, d0, d0 33 vcvta.s32.f64 s0, d0 35 vcvta.u32.f64 s0, d0 41 vcvtn.s32.f64 s0, d0 43 vcvtn.u32.f64 s0, d0 49 vcvtp.s32.f64 s0, d0 51 vcvtp.u32.f64 s0, d0 57 vcvtm.s32.f64 s0, d0 59 vcvtm.u32.f64 s0, d0 [all …]
|
D | simple-fp-encoding.s | 3 vadd.f64 d16, d17, d16 5 @ CHECK: vadd.f64 d16, d17, d16 @ encoding: [0xa0,0x0b,0x71,0xee] 8 vsub.f64 d16, d17, d16 10 @ CHECK: vsub.f64 d16, d17, d16 @ encoding: [0xe0,0x0b,0x71,0xee] 13 vdiv.f64 d16, d17, d16 16 vdiv.f64 d5, d7 18 @ CHECK: vdiv.f64 d16, d17, d16 @ encoding: [0xa0,0x0b,0xc1,0xee] 21 @ CHECK: vdiv.f64 d5, d5, d7 @ encoding: [0x07,0x5b,0x85,0xee] 24 vmul.f64 d16, d17, d16 25 vmul.f64 d20, d17 [all …]
|
D | d16.s | 7 @ D16-NEXT: vadd.f64 d1, d2, d16 8 vadd.f64 d1, d2, d16 11 @ D16-NEXT: vadd.f64 d1, d17, d6 12 vadd.f64 d1, d17, d6 15 @ D16-NEXT: vadd.f64 d19, d7, d6 16 vadd.f64 d19, d7, d6 19 @ D16-NEXT: vcvt.f64.f32 d22, s4 20 vcvt.f64.f32 d22, s4 23 @ D16-NEXT: vcvt.f32.f64 s26, d30 24 vcvt.f32.f64 s26, d30
|
D | invalid-fp-armv8.s | 5 vcvtt.f64.f16 d3, s1 6 @ V7-NOT: vcvtt.f64.f16 d3, s1 @ encoding: [0xe0,0x3b,0xb2,0xee] 7 vcvtt.f16.f64 s5, d12 8 @ V7-NOT: vcvtt.f16.f64 s5, d12 @ encoding: [0xcc,0x2b,0xf3,0xee] 39 vselgt.f64 s3, s2, s1 43 vselgt.f64 q0, s3, q1 48 vminnm.f64 s3, s2, s1 52 vmaxnm.f64 q0, s3, q1 54 vmaxnmgt.f64 q0, s3, q1 57 vcvta.s32.f64 d3, s2 [all …]
|
D | vfp4.s | 7 @ ARM: vfma.f64 d16, d18, d17 @ encoding: [0xa1,0x0b,0xe2,0xee] 8 @ THUMB: vfma.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xa1,0x0b] 10 @ THUMB_V7EM-ERRORS-NEXT: vfma.f64 d16, d18, d17 11 vfma.f64 d16, d18, d17 30 @ ARM: vfnma.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xd2,0xee] 31 @ THUMB: vfnma.f64 d16, d18, d17 @ encoding: [0xd2,0xee,0xe1,0x0b] 33 @ THUMB_V7EM-ERRORS-NEXT: vfnma.f64 d16, d18, d17 34 vfnma.f64 d16, d18, d17 41 @ ARM: vfms.f64 d16, d18, d17 @ encoding: [0xe1,0x0b,0xe2,0xee] 42 @ THUMB: vfms.f64 d16, d18, d17 @ encoding: [0xe2,0xee,0xe1,0x0b] [all …]
|
/external/valgrind/VEX/priv/ |
D | guest_generic_x87.c | 105 void convert_f64le_to_f80le ( /*IN*/UChar* f64, /*OUT*/UChar* f80 ) in convert_f64le_to_f80le() argument 111 sign = toUChar( (f64[7] >> 7) & 1 ); in convert_f64le_to_f80le() 112 bexp = (f64[7] << 4) | ((f64[6] >> 4) & 0x0F); in convert_f64le_to_f80le() 122 (f64[6] & 0x0F) == 0 in convert_f64le_to_f80le() 123 && f64[5] == 0 && f64[4] == 0 && f64[3] == 0 in convert_f64le_to_f80le() 124 && f64[2] == 0 && f64[1] == 0 && f64[0] == 0 in convert_f64le_to_f80le() 147 if (read_bit_array(f64, i)) in convert_f64le_to_f80le() 156 read_bit_array( f64, i ) ); in convert_f64le_to_f80le() 194 if (f64[6] & 8) { in convert_f64le_to_f80le() 223 f80[7] = toUChar( (1 << 7) | ((f64[6] << 3) & 0x78) in convert_f64le_to_f80le() [all …]
|
/external/llvm/test/CodeGen/Thumb2/ |
D | float-intrinsics-double.ll | 8 declare double @llvm.sqrt.f64(double %Val) 12 ; HARD: vsqrt.f64 d0, d0 13 %1 = call double @llvm.sqrt.f64(double %a) 17 declare double @llvm.powi.f64(double %Val, i32 %power) 22 %1 = call double @llvm.powi.f64(double %a, i32 %b) 26 declare double @llvm.sin.f64(double %Val) 31 %1 = call double @llvm.sin.f64(double %a) 35 declare double @llvm.cos.f64(double %Val) 40 %1 = call double @llvm.cos.f64(double %a) 44 declare double @llvm.pow.f64(double %Val, double %power) [all …]
|
/external/llvm/test/MC/Disassembler/ARM/ |
D | thumb-fp-armv8.txt | 4 # CHECK: vcvtt.f64.f16 d3, s1 7 # CHECK: vcvtt.f16.f64 s5, d12 10 # CHECK: vcvtb.f64.f16 d3, s1 13 # CHECK: vcvtb.f16.f64 s4, d1 17 # CHECK: vcvttge.f64.f16 d3, s1 21 # CHECK: vcvttgt.f16.f64 s5, d12 25 # CHECK: vcvtbeq.f64.f16 d3, s1 29 # CHECK: vcvtblt.f16.f64 s4, d1 36 # CHECK: vcvta.s32.f64 s2, d3 42 # CHECK: vcvtn.s32.f64 s6, d23 [all …]
|
D | fp-armv8.txt | 4 # CHECK: vcvtt.f64.f16 d3, s1 7 # CHECK: vcvtt.f16.f64 s5, d12 10 # CHECK: vcvtb.f64.f16 d3, s1 13 # CHECK: vcvtb.f16.f64 s4, d1 16 # CHECK: vcvttge.f64.f16 d3, s1 19 # CHECK: vcvttgt.f16.f64 s5, d12 22 # CHECK: vcvtbeq.f64.f16 d3, s1 25 # CHECK: vcvtblt.f16.f64 s4, d1 32 # CHECK: vcvta.s32.f64 s2, d3 38 # CHECK: vcvtn.s32.f64 s6, d23 [all …]
|
/external/llvm/test/ExecutionEngine/Interpreter/ |
D | intrinsics.ll | 5 declare double @llvm.sin.f64(double) 7 declare double @llvm.cos.f64(double) 9 declare double @llvm.floor.f64(double) 11 declare double @llvm.ceil.f64(double) 13 declare double @llvm.trunc.f64(double) 15 declare double @llvm.round.f64(double) 17 declare double @llvm.copysign.f64(double, double) 21 %sin64 = call double @llvm.sin.f64(double 0.000000e+00) 23 %cos64 = call double @llvm.cos.f64(double 0.000000e+00) 25 %floor64 = call double @llvm.floor.f64(double 0.000000e+00) [all …]
|
/external/llvm/test/CodeGen/XCore/ |
D | float-intrinsics.ll | 2 declare double @llvm.cos.f64(double) 3 declare double @llvm.exp.f64(double) 4 declare double @llvm.exp2.f64(double) 5 declare double @llvm.log.f64(double) 6 declare double @llvm.log10.f64(double) 7 declare double @llvm.log2.f64(double) 8 declare double @llvm.pow.f64(double, double) 9 declare double @llvm.powi.f64(double, i32) 10 declare double @llvm.sin.f64(double) 11 declare double @llvm.sqrt.f64(double) [all …]
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 26 // Handle all vector types as either f64 or v2f64. 27 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 30 // f64 and v2f64 are passed in adjacent GPRs, possibly split onto the stack 31 CCIfType<[f64, v2f64], CCCustom<"CC_ARM_APCS_Custom_f64">>, 37 CCIfType<[f64], CCAssignToStack<8, 4>>, 45 // Handle all vector types as either f64 or v2f64. 46 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, 49 CCIfType<[f64, v2f64], CCCustom<"RetCC_ARM_APCS_Custom_f64">>, 59 // Handle all vector types as either f64 or v2f64. 60 CCIfType<[v1i64, v2i32, v4i16, v8i8, v2f32], CCBitConvertToType<f64>>, [all …]
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrVSX.td | 76 [(set f64:$XT, (load xoaddr:$src))]>; 98 [(store f64:$XT, xoaddr:$dst)]>; 116 [(set f64:$XT, (fadd f64:$XA, f64:$XB))]>; 120 [(set f64:$XT, (fmul f64:$XA, f64:$XB))]>; 147 [(set f64:$XT, (fsub f64:$XA, f64:$XB))]>; 164 [(set f64:$XT, (fma f64:$XA, f64:$XB, f64:$XTi))]>, 180 [(set f64:$XT, (fma f64:$XA, f64:$XB, (fneg f64:$XTi)))]>, 196 [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, f64:$XTi)))]>, 212 [(set f64:$XT, (fneg (fma f64:$XA, f64:$XB, (fneg f64:$XTi))))]>, 355 [(set f64:$XT, (fdiv f64:$XA, f64:$XB))]>; [all …]
|
/external/llvm/test/Transforms/InstSimplify/ |
D | fold-builtin-fma.ll | 7 declare double @llvm.fma.f64(double, double, double) 11 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0.0) 19 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 5.0) 27 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0x7FF8000000000000) 34 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0xFFF8000000000000) 42 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0x7FF0000000000000) 49 %1 = call double @llvm.fma.f64(double 7.0, double 8.0, double 0xFFF0000000000000) 57 %1 = call double @llvm.fma.f64(double 0x7FF8000000000000, double 8.0, double 0.0) 65 %1 = call double @llvm.fma.f64(double 7.0, double 0x7FF8000000000000, double 0.0) 73 %1 = call double @llvm.fma.f64(double 0xFFF8000000000000, double 8.0, double 0.0) [all …]
|
/external/llvm/test/CodeGen/SystemZ/ |
D | fp-sqrt-02.ll | 5 declare double @llvm.sqrt.f64(double %f) 13 %res = call double @llvm.sqrt.f64(double %val) 23 %res = call double @llvm.sqrt.f64(double %val) 34 %res = call double @llvm.sqrt.f64(double %val) 47 %res = call double @llvm.sqrt.f64(double %val) 59 %res = call double @llvm.sqrt.f64(double %val) 72 %res = call double @llvm.sqrt.f64(double %val) 100 %sqrt0 = call double @llvm.sqrt.f64(double %val0) 101 %sqrt1 = call double @llvm.sqrt.f64(double %val1) 102 %sqrt2 = call double @llvm.sqrt.f64(double %val2) [all …]
|
D | fp-round-01.ll | 15 ; Test rint for f64. 16 declare double @llvm.rint.f64(double %f) 21 %res = call double @llvm.rint.f64(double %f) 47 ; Test nearbyint for f64. 48 declare double @llvm.nearbyint.f64(double %f) 53 %res = call double @llvm.nearbyint.f64(double %f) 70 ; Test floor for f64. 71 declare double @llvm.floor.f64(double %f) 76 %res = call double @llvm.floor.f64(double %f) 93 ; Test ceil for f64. [all …]
|
/external/valgrind/none/tests/amd64/ |
D | nan80and64.c | 61 static void rev64 ( UChar* f64 ) in rev64() argument 63 SWAPC( f64[0], f64[7] ); in rev64() 64 SWAPC( f64[1], f64[6] ); in rev64() 65 SWAPC( f64[2], f64[5] ); in rev64() 66 SWAPC( f64[3], f64[4] ); in rev64()
|
/external/llvm/test/Transforms/BBVectorize/X86/ |
D | simple-int.ll | 1 target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v… 4 declare double @llvm.fma.f64(double, double, double) 5 declare double @llvm.fmuladd.f64(double, double, double) 6 declare double @llvm.cos.f64(double) 7 declare double @llvm.powi.f64(double, i32) 13 %Y1 = call double @llvm.fma.f64(double %X1, double %A1, double %C1) 14 %Y2 = call double @llvm.fma.f64(double %X2, double %A2, double %C2) 27 %Y1 = call double @llvm.fmuladd.f64(double %X1, double %A1, double %C1) 28 %Y2 = call double @llvm.fmuladd.f64(double %X2, double %A2, double %C2) 41 %Y1 = call double @llvm.cos.f64(double %X1) [all …]
|
/external/llvm/test/CodeGen/ARM/ |
D | fnmscs.ll | 64 ; VFP2: vnmla.f64 67 ; NEON: vnmla.f64 70 ; A8U: vnmul.f64 d 71 ; A8U: vsub.f64 d 74 ; A8: vnmul.f64 d 75 ; A8: vsub.f64 d 85 ; VFP2: vnmla.f64 88 ; NEON: vnmla.f64 91 ; A8U: vnmul.f64 d 92 ; A8U: vsub.f64 d [all …]
|
/external/llvm/test/CodeGen/AArch64/ |
D | arm64-cvt.ll | 26 %tmp3 = call i32 @llvm.aarch64.neon.fcvtas.i32.f64(double %A) 34 %tmp3 = call i64 @llvm.aarch64.neon.fcvtas.i64.f64(double %A) 40 declare i32 @llvm.aarch64.neon.fcvtas.i32.f64(double) nounwind readnone 41 declare i64 @llvm.aarch64.neon.fcvtas.i64.f64(double) nounwind readnone 66 %tmp3 = call i32 @llvm.aarch64.neon.fcvtau.i32.f64(double %A) 74 %tmp3 = call i64 @llvm.aarch64.neon.fcvtau.i64.f64(double %A) 80 declare i32 @llvm.aarch64.neon.fcvtau.i32.f64(double) nounwind readnone 81 declare i64 @llvm.aarch64.neon.fcvtau.i64.f64(double) nounwind readnone 106 %tmp3 = call i32 @llvm.aarch64.neon.fcvtms.i32.f64(double %A) 114 %tmp3 = call i64 @llvm.aarch64.neon.fcvtms.i64.f64(double %A) [all …]
|