/external/libaom/libaom/aom_dsp/simd/ |
D | v64_intrinsics.h | 22 typedef c_v64 v64; typedef 24 SIMD_INLINE uint32_t v64_low_u32(v64 a) { return c_v64_low_u32(a); } in v64_low_u32() 25 SIMD_INLINE uint32_t v64_high_u32(v64 a) { return c_v64_high_u32(a); } in v64_high_u32() 26 SIMD_INLINE int32_t v64_low_s32(v64 a) { return c_v64_low_s32(a); } in v64_low_s32() 27 SIMD_INLINE int32_t v64_high_s32(v64 a) { return c_v64_high_s32(a); } in v64_high_s32() 28 SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) { in v64_from_32() 31 SIMD_INLINE v64 v64_from_64(uint64_t x) { return c_v64_from_64(x); } in v64_from_64() 32 SIMD_INLINE uint64_t v64_u64(v64 x) { return c_v64_u64(x); } in v64_u64() 33 SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { in v64_from_16() 50 SIMD_INLINE v64 v64_load_unaligned(const void *p) { in v64_load_unaligned() [all …]
|
D | v64_intrinsics_arm.h | 24 typedef int64x1_t v64; typedef 26 SIMD_INLINE uint32_t v64_low_u32(v64 a) { in v64_low_u32() 30 SIMD_INLINE uint32_t v64_high_u32(v64 a) { in v64_high_u32() 34 SIMD_INLINE int32_t v64_low_s32(v64 a) { in v64_low_s32() 38 SIMD_INLINE int32_t v64_high_s32(v64 a) { in v64_high_s32() 42 SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { in v64_from_16() 47 SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) { in v64_from_32() 51 SIMD_INLINE v64 v64_from_64(uint64_t x) { return vcreate_s64(x); } in v64_from_64() 53 SIMD_INLINE uint64_t v64_u64(v64 x) { return (uint64_t)x; } in v64_u64() 81 SIMD_INLINE v64 v64_load_aligned(const void *p) { in v64_load_aligned() [all …]
|
D | v64_intrinsics_x86.h | 23 typedef __m128i v64; typedef 25 SIMD_INLINE uint32_t v64_low_u32(v64 a) { in v64_low_u32() 29 SIMD_INLINE uint32_t v64_high_u32(v64 a) { in v64_high_u32() 33 SIMD_INLINE int32_t v64_low_s32(v64 a) { return (int32_t)_mm_cvtsi128_si32(a); } in v64_low_s32() 35 SIMD_INLINE int32_t v64_high_s32(v64 a) { in v64_high_s32() 39 SIMD_INLINE v64 v64_from_16(uint16_t a, uint16_t b, uint16_t c, uint16_t d) { in v64_from_16() 45 SIMD_INLINE v64 v64_from_32(uint32_t x, uint32_t y) { in v64_from_32() 49 SIMD_INLINE v64 v64_from_64(uint64_t x) { in v64_from_64() 57 SIMD_INLINE uint64_t v64_u64(v64 x) { in v64_u64() 77 SIMD_INLINE v64 v64_load_aligned(const void *p) { in v64_load_aligned() [all …]
|
D | v128_intrinsics_c.h | 31 c_v64 v64[2]; member 36 SIMD_INLINE c_v64 c_v128_low_v64(c_v128 a) { return a.v64[0]; } in c_v128_low_v64() 38 SIMD_INLINE c_v64 c_v128_high_v64(c_v128 a) { return a.v64[1]; } in c_v128_high_v64() 49 t.v64[1] = hi; in c_v128_from_v64() 50 t.v64[0] = lo; in c_v128_from_v64() 104 t.v64[1] = t.v64[0] = c_v64_dup_8(x); in c_v128_dup_8() 110 t.v64[1] = t.v64[0] = c_v64_dup_16(x); in c_v128_dup_16() 116 t.v64[1] = t.v64[0] = c_v64_dup_32(x); in c_v128_dup_32() 127 return c_v64_dotp_su8(a.v64[1], b.v64[1]) + in c_v128_dotp_su8() 128 c_v64_dotp_su8(a.v64[0], b.v64[0]); in c_v128_dotp_su8() [all …]
|
D | v128_intrinsics.h | 27 SIMD_INLINE v64 v128_low_v64(v128 a) { return c_v128_low_v64(a); } in v128_low_v64() 28 SIMD_INLINE v64 v128_high_v64(v128 a) { return c_v128_high_v64(a); } in v128_high_v64() 32 SIMD_INLINE v128 v128_from_v64(v64 hi, v64 lo) { in v128_from_v64() 115 SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) { return c_v128_mul_s16(a, b); } in v128_mul_s16() 156 SIMD_INLINE v128 v128_zip_8(v64 a, v64 b) { return c_v128_zip_8(a, b); } in v128_zip_8() 157 SIMD_INLINE v128 v128_zip_16(v64 a, v64 b) { return c_v128_zip_16(a, b); } in v128_zip_16() 158 SIMD_INLINE v128 v128_zip_32(v64 a, v64 b) { return c_v128_zip_32(a, b); } in v128_zip_32() 177 SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { return c_v128_unpack_u8_s16(a); } in v128_unpack_u8_s16() 184 SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) { return c_v128_unpack_s8_s16(a); } in v128_unpack_s8_s16() 203 SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) { return c_v128_unpack_u16_s32(a); } in v128_unpack_u16_s32() [all …]
|
D | v128_intrinsics_x86.h | 24 SIMD_INLINE v64 v128_low_v64(v128 a) { in v128_low_v64() 28 SIMD_INLINE v64 v128_high_v64(v128 a) { return _mm_srli_si128(a, 8); } in v128_high_v64() 30 SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { in v128_from_v64() 178 SIMD_INLINE v128 v128_zip_8(v64 a, v64 b) { return _mm_unpacklo_epi8(b, a); } in v128_zip_8() 180 SIMD_INLINE v128 v128_zip_16(v64 a, v64 b) { return _mm_unpacklo_epi16(b, a); } in v128_zip_16() 182 SIMD_INLINE v128 v128_zip_32(v64 a, v64 b) { return _mm_unpacklo_epi32(b, a); } in v128_zip_32() 230 SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { in v128_unpack_u8_s16() 242 SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) { in v128_unpack_s8_s16() 275 SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) { in v128_unpack_u16_s32() 279 SIMD_INLINE v128 v128_unpack_s16_s32(v64 a) { in v128_unpack_s16_s32() [all …]
|
D | v128_intrinsics_arm.h | 25 SIMD_INLINE v64 v128_low_v64(v128 a) { return vget_low_s64(a); } in v128_low_v64() 27 SIMD_INLINE v64 v128_high_v64(v128 a) { return vget_high_s64(a); } in v128_high_v64() 29 SIMD_INLINE v128 v128_from_v64(v64 a, v64 b) { return vcombine_s64(b, a); } in v128_from_v64() 275 SIMD_INLINE v128 v128_mul_s16(v64 a, v64 b) { in v128_mul_s16() 433 SIMD_INLINE v128 v128_zip_8(v64 x, v64 y) { in v128_zip_8() 458 SIMD_INLINE v128 v128_zip_16(v64 x, v64 y) { in v128_zip_16() 483 SIMD_INLINE v128 v128_zip_32(v64 x, v64 y) { in v128_zip_32() 561 SIMD_INLINE v128 v128_unpack_u8_s16(v64 a) { in v128_unpack_u8_s16() 573 SIMD_INLINE v128 v128_unpack_s8_s16(v64 a) { in v128_unpack_s8_s16() 609 SIMD_INLINE v128 v128_unpack_u16_s32(v64 a) { in v128_unpack_u16_s32() [all …]
|
D | v256_intrinsics_c.h | 31 c_v64 v64[4]; member 37 SIMD_INLINE c_v64 c_v256_low_v64(c_v256 a) { return a.v64[0]; } in c_v256_low_v64() 111 t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_8(x); in c_v256_dup_8() 117 t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_16(x); in c_v256_dup_16() 123 t.v64[3] = t.v64[2] = t.v64[1] = t.v64[0] = c_v64_dup_32(x); in c_v256_dup_32()
|
/external/clang/test/CodeGen/ |
D | builtins-hexagon.c | 7 int v64 __attribute__((__vector_size__(256))); in foo() local 1761 __builtin_HEXAGON_V6_hi_128B(v64); in foo() 1765 __builtin_HEXAGON_V6_lo_128B(v64); in foo() 1835 __builtin_HEXAGON_V6_vaddb_dv_128B(v64, v64); in foo() 1851 __builtin_HEXAGON_V6_vaddh_dv_128B(v64, v64); in foo() 1865 __builtin_HEXAGON_V6_vaddhsat_dv_128B(v64, v64); in foo() 1883 __builtin_HEXAGON_V6_vaddubsat_dv_128B(v64, v64); in foo() 1891 __builtin_HEXAGON_V6_vadduhsat_dv_128B(v64, v64); in foo() 1903 __builtin_HEXAGON_V6_vaddw_dv_128B(v64, v64); in foo() 1917 __builtin_HEXAGON_V6_vaddwsat_dv_128B(v64, v64); in foo() [all …]
|
D | hexagon-inline-asm.c | 3 typedef int v64 __attribute__((__vector_size__(64))) typedef 8 void foo(v64 v0, v64 v1, v64 *p) { in foo() 10 v64 q0; in foo()
|
/external/lzma/CPP/Windows/ |
D | TimeUtils.cpp | 59 UInt64 v64 = ft.dwLowDateTime | ((UInt64)ft.dwHighDateTime << 32); in FileTimeToDosTime() 63 v64 += (kNumTimeQuantumsInSecond * 2 - 1); in FileTimeToDosTime() 64 v64 /= kNumTimeQuantumsInSecond; in FileTimeToDosTime() 65 sec = (unsigned)(v64 % 60); in FileTimeToDosTime() 66 v64 /= 60; in FileTimeToDosTime() 67 min = (unsigned)(v64 % 60); in FileTimeToDosTime() 68 v64 /= 60; in FileTimeToDosTime() 69 hour = (unsigned)(v64 % 24); in FileTimeToDosTime() 70 v64 /= 24; in FileTimeToDosTime() 72 v = (UInt32)v64; in FileTimeToDosTime()
|
/external/strace/tests-mx32/ |
D | ioctl_mtd.c | 105 TAIL_ALLOC_OBJECT_CONST_PTR(uint64_t, v64); in main() 106 fill_memory(v64, sizeof(*v64)); in main() 108 ioctl(-1, MEMGETBADBLOCK, v64); in main() 111 (unsigned int) _IOC_NR(MEMGETBADBLOCK), *v64); in main() 113 ioctl(-1, MEMSETBADBLOCK, v64); in main() 116 (unsigned int) _IOC_NR(MEMSETBADBLOCK), *v64); in main()
|
/external/strace/tests/ |
D | ioctl_mtd.c | 105 TAIL_ALLOC_OBJECT_CONST_PTR(uint64_t, v64); in main() 106 fill_memory(v64, sizeof(*v64)); in main() 108 ioctl(-1, MEMGETBADBLOCK, v64); in main() 111 (unsigned int) _IOC_NR(MEMGETBADBLOCK), *v64); in main() 113 ioctl(-1, MEMSETBADBLOCK, v64); in main() 116 (unsigned int) _IOC_NR(MEMSETBADBLOCK), *v64); in main()
|
/external/strace/tests-m32/ |
D | ioctl_mtd.c | 105 TAIL_ALLOC_OBJECT_CONST_PTR(uint64_t, v64); in main() 106 fill_memory(v64, sizeof(*v64)); in main() 108 ioctl(-1, MEMGETBADBLOCK, v64); in main() 111 (unsigned int) _IOC_NR(MEMGETBADBLOCK), *v64); in main() 113 ioctl(-1, MEMSETBADBLOCK, v64); in main() 116 (unsigned int) _IOC_NR(MEMSETBADBLOCK), *v64); in main()
|
/external/libaom/libaom/test/ |
D | simd_cmp_impl.h | 41 v64 imm_v64_shl_n_byte(v64 a) { in imm_v64_shl_n_byte() 45 v64 imm_v64_shr_n_byte(v64 a) { in imm_v64_shr_n_byte() 49 v64 imm_v64_shl_n_8(v64 a) { in imm_v64_shl_n_8() 53 v64 imm_v64_shr_n_u8(v64 a) { in imm_v64_shr_n_u8() 57 v64 imm_v64_shr_n_s8(v64 a) { in imm_v64_shr_n_s8() 61 v64 imm_v64_shl_n_16(v64 a) { in imm_v64_shl_n_16() 65 v64 imm_v64_shr_n_u16(v64 a) { in imm_v64_shr_n_u16() 69 v64 imm_v64_shr_n_s16(v64 a) { in imm_v64_shr_n_s16() 73 v64 imm_v64_shl_n_32(v64 a) { in imm_v64_shl_n_32() 77 v64 imm_v64_shr_n_u32(v64 a) { in imm_v64_shr_n_u32() [all …]
|
/external/perf_data_converter/src/quipper/ |
D | sample_info_reader_test.cc | 41 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST() 103 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST() 162 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST() 223 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST() 275 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST() 315 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST() 353 PunU32U64{.v32 = {0x68d, 0x68e}}.v64, // TID (u32 pid, tid) in TEST()
|
/external/lzma/C/Util/7z/ |
D | 7zMain.c | 313 UInt64 v64 = nt->Low | ((UInt64)nt->High << 32); in ConvertFileTimeToString() local 314 v64 /= 10000000; in ConvertFileTimeToString() 315 sec = (unsigned)(v64 % 60); v64 /= 60; in ConvertFileTimeToString() 316 min = (unsigned)(v64 % 60); v64 /= 60; in ConvertFileTimeToString() 317 hour = (unsigned)(v64 % 24); v64 /= 24; in ConvertFileTimeToString() 319 v = (UInt32)v64; in ConvertFileTimeToString()
|
/external/swiftshader/third_party/subzero/tests_lit/reader_tests/ |
D | casts.ll | 75 %v64 = zext i1 %v to i64 76 ret i64 %v64 93 %v64 = zext i8 %v to i64 109 %v64 = zext i16 %v to i64 110 ret i64 %v64 122 %v64 = zext i32 %v to i64 123 ret i64 %v64 137 %v64 = sext i1 %v to i64 155 %v64 = sext i8 %v to i64 156 ret i64 %v64 [all …]
|
/external/llvm/test/Transforms/InstCombine/ |
D | alloca.ll | 1 …:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"… 2 …:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"… 158 ; ALL: %v64 = alloca i1, align 8 161 %v64 = alloca i1, i64 1, align 8 163 call void (...) @use(i1* %v32, i1* %v64, i1* %v33)
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Transforms/InstCombine/ |
D | alloca.ll | 1 …:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"… 2 …:32-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-v64:64:64-v128:128:128"… 161 ; ALL: %v64 = alloca i1, align 8 164 %v64 = alloca i1, i64 1, align 8 166 call void (...) @use(i1* %v32, i1* %v64, i1* %v33)
|
/external/llvm/test/TableGen/ |
D | TwoLevelName.td | 29 defm v64#NAME : OT1<!strconcat("v64", ss), 64, w>;
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/TableGen/ |
D | TwoLevelName.td | 29 defm v64#NAME : OT1<!strconcat("v64", ss), 64, w>;
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/ |
D | unaligned.ll | 2 …p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-… 4 …p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-…
|
/external/llvm/test/CodeGen/PowerPC/ |
D | unaligned.ll | 2 …p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-… 4 …p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-…
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | swp-disable-Os.ll | 78 %v64 = add i32 %v59, %v63 79 %v65 = add i32 %v60, %v64 83 %v69 = add i32 %v64, %v68
|