/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_parser_test.cc | 704 ENTRY %Fft (input: c64[8,32]) -> c64[8,32] { in CreateTestCases() 705 %input = c64[8,32]{1,0} parameter(0) in CreateTestCases() 706 ROOT %fft = c64[8,32]{1,0} fft(c64[8,32]{1,0} %input), fft_type=FFT, fft_length={32} in CreateTestCases() 716 ENTRY %Ifft2d (input: c64[5,8,32]) -> c64[5,8,32] { in CreateTestCases() 717 %input = c64[5,8,32]{2,1,0} parameter(0) in CreateTestCases() 718 ROOT %fft = c64[5,8,32]{2,1,0} fft(c64[5,8,32]{2,1,0} %input), fft_type=IFFT, fft_length={8,32} in CreateTestCases() 728 ENTRY %Rfft2d (input: f32[5,64,32]) -> c64[5,64,17] { in CreateTestCases() 730 ROOT %fft = c64[5,64,17]{2,1,0} fft(f32[5,64,32]{2,1,0} %input), fft_type=RFFT, fft_length={64,32} in CreateTestCases() 740 ENTRY %Irfft3d (input: c64[5,64,128,33]) -> f32[5,64,128,64] { in CreateTestCases() 741 %input = c64[5,64,128,33]{3,2,1,0} parameter(0) in CreateTestCases() [all …]
|
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/ |
D | 2007-05-15-maskmovq.ll | 6 define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) { 9 %tmp6 = bitcast <1 x i64> %c64 to x86_mmx ; <x86_mmx> [#uses=1]
|
/external/llvm/test/CodeGen/X86/ |
D | 2007-05-15-maskmovq.ll | 6 define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) { 9 %tmp6 = bitcast <1 x i64> %c64 to x86_mmx ; <x86_mmx> [#uses=1]
|
/external/u-boot/arch/arm/dts/ |
D | uniphier-pro4-sanji.dts | 48 compatible = "st,24c64", "atmel,24c64", "i2c-eeprom";
|
D | uniphier-pro4-ace.dts | 53 compatible = "st,24c64", "atmel,24c64", "i2c-eeprom";
|
D | uniphier-pxs2-gentil.dts | 51 compatible = "st,24c64", "atmel,24c64", "i2c-eeprom";
|
D | uniphier-ld11-global.dts | 134 compatible = "st,24c64", "atmel,24c64", "i2c-eeprom";
|
D | imx6qdl-logicpd.dtsi | 168 compatible = "atmel,24c64"; 175 compatible = "atmel,24c64";
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | 2007-05-15-maskmovq.ll | 4 define void @test(<1 x i64> %c64, <1 x i64> %mask1, i8* %P) { 29 %tmp6 = bitcast <1 x i64> %c64 to x86_mmx ; <x86_mmx> [#uses=1]
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | tensor.h | 29 ? reinterpret_cast<std::complex<float>*>(tensor->data.c64) in GetTensorData() 36 ? reinterpret_cast<const std::complex<float>*>(tensor->data.c64) in GetTensorData()
|
/external/tensorflow/tensorflow/lite/kernels/ |
D | cast.cc | 91 copyCast(in, reinterpret_cast<std::complex<float>*>(out->data.c64), in copyToTensor() 119 reinterpret_cast<std::complex<float>*>(input->data.c64), output, in Eval()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/SLPVectorizer/X86/ |
D | arith-mul.ll | 11 @c64 = common global [8 x i64] zeroinitializer, align 64 48 ; SSE-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 49 ; SSE-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 50 ; SSE-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 51 ; SSE-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 52 ; SSE-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 53 ; SSE-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 54 ; SSE-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 55 ; SSE-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 83 ; SLM-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… [all …]
|
D | shift-ashr.ll | 11 @c64 = common global [8 x i64] zeroinitializer, align 64 48 ; SSE-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 49 ; SSE-NEXT: store i64 [[R1]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 50 ; SSE-NEXT: store i64 [[R2]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 51 ; SSE-NEXT: store i64 [[R3]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 52 ; SSE-NEXT: store i64 [[R4]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 53 ; SSE-NEXT: store i64 [[R5]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 54 ; SSE-NEXT: store i64 [[R6]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 55 ; SSE-NEXT: store i64 [[R7]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i6… 83 ; AVX1-NEXT: store i64 [[R0]], i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i… [all …]
|
D | arith-sub.ll | 11 @c64 = common global [8 x i64] zeroinitializer, align 64 36 ; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8 37 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 38 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 39 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 55 ; SLM-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8 56 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 57 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 58 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 68 ; AVX-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8 [all …]
|
D | arith-add.ll | 11 @c64 = common global [8 x i64] zeroinitializer, align 64 36 ; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8 37 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 38 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 39 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 55 ; SLM-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8 56 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 57 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 58 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 68 ; AVX-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align 8 [all …]
|
D | shift-shl.ll | 11 @c64 = common global [8 x i64] zeroinitializer, align 64 36 ; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8 37 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 38 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 39 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 55 ; AVX1-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align… 56 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 57 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 58 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 68 ; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align… [all …]
|
D | shift-lshr.ll | 11 @c64 = common global [8 x i64] zeroinitializer, align 64 36 ; SSE-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align 8 37 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 38 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 39 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 55 ; AVX1-NEXT: store <2 x i64> [[TMP9]], <2 x i64>* bitcast ([8 x i64]* @c64 to <2 x i64>*), align… 56 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 2) to <… 57 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 4) to <… 58 …], <2 x i64>* bitcast (i64* getelementptr inbounds ([8 x i64], [8 x i64]* @c64, i32 0, i64 6) to <… 68 ; AVX2-NEXT: store <4 x i64> [[TMP5]], <4 x i64>* bitcast ([8 x i64]* @c64 to <4 x i64>*), align… [all …]
|
/external/swiftshader/third_party/LLVM/unittests/VMCore/ |
D | InstructionsTest.cpp | 121 const Constant* c64 = Constant::getNullValue(V8x64Ty); in TEST() local 128 EXPECT_EQ(CastInst::Trunc, CastInst::getCastOpcode(c64, true, V8x8Ty, true)); in TEST()
|
/external/bouncycastle/repackaged/bcprov/src/main/java/com/android/org/bouncycastle/math/ |
D | Primes.java | 504 long c64 = c & 0xFFFFFFFFL; in implSTRandomPrime() local 505 if (isPrime32(c64)) in implSTRandomPrime() 507 return new STOutput(BigInteger.valueOf(c64), primeSeed, primeGenCounter); in implSTRandomPrime()
|
/external/bouncycastle/bcprov/src/main/java/org/bouncycastle/math/ |
D | Primes.java | 500 long c64 = c & 0xFFFFFFFFL; in implSTRandomPrime() local 501 if (isPrime32(c64)) in implSTRandomPrime() 503 return new STOutput(BigInteger.valueOf(c64), primeSeed, primeGenCounter); in implSTRandomPrime()
|
/external/protobuf/src/google/protobuf/stubs/ |
D | int128.h | 348 uint64 c64 = a64 * b00 + a32 * b32 + a00 * b64; variable 349 this->hi_ = (c96 << 32) + c64;
|
/external/clang/test/Sema/ |
D | attr-mode.c | 34 typedef _Complex float c64 __attribute((mode(DC)));
|
/external/tcpdump/tests/ |
D | icmpv6.out | 13 0x0020: 0464 6f6d 3104 646f 6d32 0374 6c64 0000
|
D | print-xx.out | 82 0x0260: 4954 4c45 3e50 6c61 6365 686f 6c64 6572 205 0x0a10: 6f75 6c64 2062 6520 756e 6465 7220 3c54 278 0x0ea0: 3e57 6f72 6c64 0a57 6964 6520 5765 6220 307 0x1070: 6974 2077 6f75 6c64 2067 6976 6520 4465
|
D | print-x.out | 76 0x0250: 3c54 4954 4c45 3e50 6c61 6365 686f 6c64 199 0x0a00: 7368 6f75 6c64 2062 6520 756e 6465 7220 272 0x0e90: 2f22 3e57 6f72 6c64 0a57 6964 6520 5765 301 0x1060: 6420 6974 2077 6f75 6c64 2067 6976 6520
|