/external/XNNPACK/src/x32-zip/ |
D | x2-neon.c | 26 uint32x4x2_t vxy; in xnn_x32_zip_x2_ukernel__neon() local 27 vxy.val[0] = vld1q_u32(x); x += 4; in xnn_x32_zip_x2_ukernel__neon() 28 vxy.val[1] = vld1q_u32(y); y += 4; in xnn_x32_zip_x2_ukernel__neon() 29 vst2q_u32(o, vxy); o += 8; in xnn_x32_zip_x2_ukernel__neon() 34 uint32x2x2_t vxy; in xnn_x32_zip_x2_ukernel__neon() local 35 vxy.val[0] = vld1_u32(x); x += 2; in xnn_x32_zip_x2_ukernel__neon() 36 vxy.val[1] = vld1_u32(y); y += 2; in xnn_x32_zip_x2_ukernel__neon() 37 vst2_u32(o, vxy); o += 4; in xnn_x32_zip_x2_ukernel__neon() 40 uint32x2_t vxy = vld1_dup_u32(x); in xnn_x32_zip_x2_ukernel__neon() local 41 vxy = vld1_lane_u32(y, vxy, 1); in xnn_x32_zip_x2_ukernel__neon() [all …]
|
D | xm-neon.c | 46 const uint32x4x2_t vxy = vzipq_u32(vx, vy); in xnn_x32_zip_xm_ukernel__neon() local 49 vst1_u32(output, vget_low_u32(vxy.val[0])); in xnn_x32_zip_xm_ukernel__neon() 53 vst1_u32(output, vget_high_u32(vxy.val[0])); in xnn_x32_zip_xm_ukernel__neon() 57 vst1_u32(output, vget_low_u32(vxy.val[1])); in xnn_x32_zip_xm_ukernel__neon() 61 vst1_u32(output, vget_high_u32(vxy.val[1])); in xnn_x32_zip_xm_ukernel__neon() 74 const uint32x2x2_t vxy = vzip_u32(vx, vy); in xnn_x32_zip_xm_ukernel__neon() local 77 vst1_u32(output, vxy.val[0]); in xnn_x32_zip_xm_ukernel__neon() 81 vst1_u32(output, vxy.val[1]); in xnn_x32_zip_xm_ukernel__neon() 88 const uint32x2_t vxy = vld1_lane_u32(y, vx, 1); in xnn_x32_zip_xm_ukernel__neon() local 91 vst1_u32(output, vxy); in xnn_x32_zip_xm_ukernel__neon()
|
D | x3-sse2.c | 38 const __m128 vxy = _mm_shuffle_ps(vx, vy, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_x32_zip_x3_ukernel__sse2() local 45 const __m128 vxyz0 = _mm_shuffle_ps(vxy, vzx, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_x32_zip_x3_ukernel__sse2() 47 const __m128 vxyz1 = _mm_shuffle_ps(vyz, vxy, _MM_SHUFFLE(3, 1, 2, 0)); in xnn_x32_zip_x3_ukernel__sse2() 70 const __m128 vxy = _mm_unpacklo_ps(vx, vy); in xnn_x32_zip_x3_ukernel__sse2() local 76 _mm_storeu_ps(o, _mm_shuffle_ps(vxy, vzx, _MM_SHUFFLE(3, 0, 1, 0))); in xnn_x32_zip_x3_ukernel__sse2()
|
D | x3-neon.c | 43 uint32x2_t vxy = vld1_dup_u32(x); in xnn_x32_zip_x3_ukernel__neon() local 45 vxy = vld1_lane_u32(y, vxy, 1); in xnn_x32_zip_x3_ukernel__neon() 46 vst1_u32(o, vxy); o += 2; in xnn_x32_zip_x3_ukernel__neon()
|
D | x2-wasmsimd.c | 43 const v128_t vxy = wasm_f64x2_make(vx, vy); in xnn_x32_zip_x2_ukernel__wasmsimd() local 44 wasm_v128_store(o, wasm_v32x4_shuffle(vxy, vxy, 0, 2, 1, 3)); in xnn_x32_zip_x2_ukernel__wasmsimd()
|
D | x3-wasmsimd.c | 38 const v128_t vxy = wasm_v32x4_shuffle(vx, vy, 0, 2, 4, 6); in xnn_x32_zip_x3_ukernel__wasmsimd() local 45 const v128_t vxyz0 = wasm_v32x4_shuffle(vxy, vzx, 0, 2, 4, 6); in xnn_x32_zip_x3_ukernel__wasmsimd() 47 const v128_t vxyz1 = wasm_v32x4_shuffle(vyz, vxy, 0, 2, 5, 7); in xnn_x32_zip_x3_ukernel__wasmsimd()
|
D | x4-wasmsimd.c | 65 const v128_t vxy = wasm_f64x2_make(vx, vy); in xnn_x32_zip_x4_ukernel__wasmsimd() local 68 const v128_t vxyzw_lo = wasm_v32x4_shuffle(vxy, vzw, 0, 2, 4, 6); in xnn_x32_zip_x4_ukernel__wasmsimd() 69 const v128_t vxyzw_hi = wasm_v32x4_shuffle(vxy, vzw, 1, 3, 5, 7); in xnn_x32_zip_x4_ukernel__wasmsimd()
|
D | x4-sse2.c | 65 const __m128i vxy = _mm_unpacklo_epi32(vx, vy); in xnn_x32_zip_x4_ukernel__sse2() local 68 const __m128i vxyzw_lo = _mm_unpacklo_epi64(vxy, vzw); in xnn_x32_zip_x4_ukernel__sse2() 69 const __m128i vxyzw_hi = _mm_unpackhi_epi64(vxy, vzw); in xnn_x32_zip_x4_ukernel__sse2()
|
D | xm-wasmsimd.c | 86 const v128_t vxy = wasm_f64x2_make(vx, vy); in xnn_x32_zip_xm_ukernel__wasmsimd() local 89 const v128_t vxyzw_lo = wasm_v32x4_shuffle(vxy, vzw, 0, 2, 4, 6); in xnn_x32_zip_xm_ukernel__wasmsimd() 90 const v128_t vxyzw_hi = wasm_v32x4_shuffle(vxy, vzw, 1, 3, 5, 7); in xnn_x32_zip_xm_ukernel__wasmsimd()
|
D | x2-sse2.c | 43 const __m128i vxy = _mm_unpacklo_epi32(vx, vy); in xnn_x32_zip_x2_ukernel__sse2() local 44 _mm_storeu_si128((__m128i*) o, vxy); in xnn_x32_zip_x2_ukernel__sse2()
|
D | xm-sse2.c | 85 const __m128i vxy = _mm_unpacklo_epi32(vx, vy); in xnn_x32_zip_xm_ukernel__sse2() local 88 const __m128i vxyzw_lo = _mm_unpacklo_epi64(vxy, vzw); in xnn_x32_zip_xm_ukernel__sse2() 89 const __m128i vxyzw_hi = _mm_unpackhi_epi64(vxy, vzw); in xnn_x32_zip_xm_ukernel__sse2()
|
/external/XNNPACK/src/x8-zip/ |
D | x2-neon.c | 25 uint8x8x2_t vxy; in xnn_x8_zip_x2_ukernel__neon() local 26 vxy.val[0] = vld1_u8(x); x += 8; in xnn_x8_zip_x2_ukernel__neon() 27 vxy.val[1] = vld1_u8(y); y += 8; in xnn_x8_zip_x2_ukernel__neon() 28 vst2_u8(o, vxy); o += 16;; in xnn_x8_zip_x2_ukernel__neon() 33 uint8x8x2_t vxy; in xnn_x8_zip_x2_ukernel__neon() local 34 vxy.val[0] = vld1_u8((const uint8_t*) ((uintptr_t) x + address_increment)); in xnn_x8_zip_x2_ukernel__neon() 35 vxy.val[1] = vld1_u8((const uint8_t*) ((uintptr_t) y + address_increment)); in xnn_x8_zip_x2_ukernel__neon() 36 vst2_u8((uint8_t*) ((uintptr_t) o + address_increment * 2), vxy); in xnn_x8_zip_x2_ukernel__neon()
|
D | xm-neon.c | 42 const uint8x8x2_t vxy = vzip_u8(vx, vy); in xnn_x8_zip_xm_ukernel__neon() local 44 …const uint16x4x2_t vxyzw_lo = vzip_u16(vreinterpret_u16_u8(vxy.val[0]), vreinterpret_u16_u8(vzw.va… in xnn_x8_zip_xm_ukernel__neon() 45 …const uint16x4x2_t vxyzw_hi = vzip_u16(vreinterpret_u16_u8(vxy.val[1]), vreinterpret_u16_u8(vzw.va… in xnn_x8_zip_xm_ukernel__neon() 85 const uint8x8x2_t vxy = vzip_u8(vreinterpret_u8_u64(vx), vreinterpret_u8_u64(vy)); in xnn_x8_zip_xm_ukernel__neon() local 87 …const uint16x4x2_t vxyzw_lo = vzip_u16(vreinterpret_u16_u8(vxy.val[0]), vreinterpret_u16_u8(vzw.va… in xnn_x8_zip_xm_ukernel__neon() 88 …const uint16x4x2_t vxyzw_hi = vzip_u16(vreinterpret_u16_u8(vxy.val[1]), vreinterpret_u16_u8(vzw.va… in xnn_x8_zip_xm_ukernel__neon()
|
D | xm-sse2.c | 113 const __m128i vxy = _mm_unpacklo_epi8(vx, vy); in xnn_x8_zip_xm_ukernel__sse2() local 115 __m128i vxyzw0 = _mm_unpacklo_epi16(vxy, vzw); in xnn_x8_zip_xm_ukernel__sse2() 116 __m128i vxyzw1 = _mm_unpackhi_epi16(vxy, vzw); in xnn_x8_zip_xm_ukernel__sse2() 156 const __m128i vxy = _mm_unpacklo_epi8(vx, vy); in xnn_x8_zip_xm_ukernel__sse2() local 158 __m128i vxyzw0 = _mm_unpacklo_epi16(vxy, vzw); in xnn_x8_zip_xm_ukernel__sse2() 159 __m128i vxyzw1 = _mm_unpackhi_epi16(vxy, vzw); in xnn_x8_zip_xm_ukernel__sse2()
|
/external/subsampling-scale-image-view/library/src/main/java/com/davemorrissey/labs/subscaleview/ |
D | SubsamplingScaleImageView.java | 2105 public final PointF viewToSourceCoord(PointF vxy) { in viewToSourceCoord() argument 2106 return viewToSourceCoord(vxy.x, vxy.y, new PointF()); in viewToSourceCoord() 2125 public final PointF viewToSourceCoord(PointF vxy, PointF sTarget) { in viewToSourceCoord() argument 2126 return viewToSourceCoord(vxy.x, vxy.y, sTarget); in viewToSourceCoord()
|
/external/XNNPACK/src/amalgam/ |
D | sse2.c | 12941 const __m128i vxy = _mm_unpacklo_epi32(vx, vy); in xnn_x32_zip_x2_ukernel__sse2() local 12942 _mm_storeu_si128((__m128i*) o, vxy); in xnn_x32_zip_x2_ukernel__sse2() 12979 const __m128 vxy = _mm_shuffle_ps(vx, vy, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_x32_zip_x3_ukernel__sse2() local 12986 const __m128 vxyz0 = _mm_shuffle_ps(vxy, vzx, _MM_SHUFFLE(2, 0, 2, 0)); in xnn_x32_zip_x3_ukernel__sse2() 12988 const __m128 vxyz1 = _mm_shuffle_ps(vyz, vxy, _MM_SHUFFLE(3, 1, 2, 0)); in xnn_x32_zip_x3_ukernel__sse2() 13011 const __m128 vxy = _mm_unpacklo_ps(vx, vy); in xnn_x32_zip_x3_ukernel__sse2() local 13017 _mm_storeu_ps(o, _mm_shuffle_ps(vxy, vzx, _MM_SHUFFLE(3, 0, 1, 0))); in xnn_x32_zip_x3_ukernel__sse2() 13084 const __m128i vxy = _mm_unpacklo_epi32(vx, vy); in xnn_x32_zip_x4_ukernel__sse2() local 13087 const __m128i vxyzw_lo = _mm_unpacklo_epi64(vxy, vzw); in xnn_x32_zip_x4_ukernel__sse2() 13088 const __m128i vxyzw_hi = _mm_unpackhi_epi64(vxy, vzw); in xnn_x32_zip_x4_ukernel__sse2() [all …]
|