Searched refs:unaligned_load (Results 1 – 4 of 4) sorted by relevance
/external/skqp/src/opts/ |
D | SkChecksum_opts.h | 23 static inline T unaligned_load(const P* p) { in unaligned_load() function 45 a = _mm_crc32_u64(a, unaligned_load<uint64_t>(data+ 0)); in hash_fn() 46 b = _mm_crc32_u64(b, unaligned_load<uint64_t>(data+ 8)); in hash_fn() 47 c = _mm_crc32_u64(c, unaligned_load<uint64_t>(data+16)); in hash_fn() 56 hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data)); in hash_fn() 63 hash = _mm_crc32_u64(hash, unaligned_load<uint64_t>(data)); in hash_fn() 72 hash32 = _mm_crc32_u32(hash32, unaligned_load<uint32_t>(data)); in hash_fn() 76 hash32 = _mm_crc32_u16(hash32, unaligned_load<uint16_t>(data)); in hash_fn() 80 hash32 = _mm_crc32_u8(hash32, unaligned_load<uint8_t>(data)); in hash_fn() 99 a = _mm_crc32_u32(a, unaligned_load<uint32_t>(data+0)); in hash_fn() [all …]
|
D | SkRasterPipeline_opts.h | 22 SI T unaligned_load(const P* p) { // const void* would work too, but const P* helps ARMv7 codegen. in unaligned_load() function 36 return unaligned_load<Dst>(&src); in bit_cast() 325 return unaligned_load<U8>(&r); 531 return unaligned_load<U16>(&p); // We have two copies. Return (the lower) one. 536 return unaligned_load<U8>(&r); 588 *r = unaligned_load<U16>(&R); 589 *g = unaligned_load<U16>(&G); 590 *b = unaligned_load<U16>(&B); 612 *r = unaligned_load<U16>((uint16_t*)&rg + 0); 613 *g = unaligned_load<U16>((uint16_t*)&rg + 4); [all …]
|
/external/llvm/test/CodeGen/Hexagon/ |
D | select-instr-align.ll | 6 ; CHECK-LABEL: unaligned_load: 21 define <16 x i32> @unaligned_load(<16 x i32>* %p, <16 x i32> %a) {
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/ |
D | select-instr-align.ll | 17 ; CHECK-LABEL: unaligned_load: 19 define <16 x i32> @unaligned_load(<16 x i32>* %p, <16 x i32> %a) #0 {
|