1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$SIMD_TILE = BATCH_TILE // 8 9#include <assert.h> 10 11#include <arm_neon.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/vcvt.h> 15 16 17void xnn_f32_f16_vcvt_ukernel__neon_x${BATCH_TILE}( 18 size_t n, 19 const float* input, 20 void* output, 21 const union xnn_f32_f16_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 22{ 23 assert(n != 0); 24 assert(n % sizeof(float) == 0); 25 assert(input != NULL); 26 assert(output != NULL); 27 28 const uint32x4_t vexp_bias = vld1q_dup_u32(¶ms->neon.exp_bias); 29 const float32x4_t vscale_to_inf = vld1q_dup_f32(¶ms->neon.scale_to_inf); 30 const uint32x4_t vexpw_max = vld1q_dup_u32(¶ms->neon.expw_max); 31 const float32x4_t vscale_to_zero = vld1q_dup_f32(¶ms->neon.scale_to_zero); 32 const uint32x4_t vbias_min = vdupq_n_u32(UINT32_C(0x40000000)); 33 const uint16x8_t vexph_mask = vdupq_n_u16(UINT16_C(0x7C00)); 34 const uint16x8_t vmanth_mask = vdupq_n_u16(UINT16_C(0x0FFF)); 35 const uint16x8_t vsignh_mask = vdupq_n_u16(UINT16_C(0x8000)); 36 const uint16x8_t vnanh = vdupq_n_u16(UINT16_C(0x7E00)); 37 38 uint16_t* o = (uint16_t*) output; 39 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 40 $for N in range(2*SIMD_TILE): 41 const float32x4_t vx${N} = vld1q_f32(input); input += 4; 42 43 $for N in range(2*SIMD_TILE): 44 const float32x4_t vabsx${N} = vabsq_f32(vx${N}); 45 46 $for N in range(2*SIMD_TILE): 47 uint32x4_t vbias${N} = vaddq_u32(vreinterpretq_u32_f32(vabsx${N}), vexp_bias); 48 49 $for N in range(2*SIMD_TILE): 50 float32x4_t vf${N} = vmulq_f32(vabsx${N}, vscale_to_inf); 51 $for N in range(2*SIMD_TILE): 52 const uint32x4_t vnanmaskw${N} = vcgtq_u32(vreinterpretq_u32_f32(vabsx${N}), vexpw_max); 53 54 $for N in range(2*SIMD_TILE): 55 vbias${N} = vandq_u32(vbias${N}, vexpw_max); 56 $for N in range(2*SIMD_TILE): 57 vf${N} = vmulq_f32(vf${N}, vscale_to_zero); 58 59 $for N in range(SIMD_TILE): 60 const uint16x8_t vnanmaskh${N} = vcombine_u16(vmovn_u32(vnanmaskw${2*N}), vmovn_u32(vnanmaskw${2*N+1})); 61 $for N in range(2*SIMD_TILE): 62 vbias${N} = vmaxq_u32(vbias${N}, vbias_min); 63 64 $for N in range(2*SIMD_TILE): 65 vf${N} = vaddq_f32(vf${N}, vreinterpretq_f32_u32(vbias${N})); 66 67 $for N in range(SIMD_TILE): 68 uint16x8_t vexph${N} = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vf${2*N}), 13), vshrn_n_u32(vreinterpretq_u32_f32(vf${2*N+1}), 13)); 69 $for N in range(SIMD_TILE): 70 uint16x8_t vmanth${N} = vcombine_u16(vmovn_u32(vreinterpretq_u32_f32(vf${2*N})), vmovn_u32(vreinterpretq_u32_f32(vf${2*N+1}))); 71 $for N in range(SIMD_TILE): 72 uint16x8_t vsignh${N} = vcombine_u16(vshrn_n_u32(vreinterpretq_u32_f32(vx${2*N}), 16), vshrn_n_u32(vreinterpretq_u32_f32(vx${2*N+1}), 16)); 73 74 $for N in range(SIMD_TILE): 75 vexph${N} = vandq_u16(vexph${N}, vexph_mask); 76 $for N in range(SIMD_TILE): 77 vmanth${N} = vandq_u16(vmanth${N}, vmanth_mask); 78 $for N in range(SIMD_TILE): 79 vsignh${N} = vandq_u16(vsignh${N}, vsignh_mask); 80 81 $for N in range(SIMD_TILE): 82 uint16x8_t vh${N} = vaddq_u16(vmanth${N}, vexph${N}); 83 84 $for N in range(SIMD_TILE): 85 vh${N} = vbslq_u16(vnanmaskh${N}, vnanh, vh${N}); 86 87 $for N in range(SIMD_TILE): 88 vh${N} = vorrq_u16(vh${N}, vsignh${N}); 89 90 $for N in range(SIMD_TILE): 91 vst1q_u16(o, vh${N}); o += 8; 92 } 93 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { 94 const float32x4_t vx = vld1q_f32(input); input += 4; 95 96 const float32x4_t vabsx = vabsq_f32(vx); 97 98 uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias); 99 100 float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf); 101 const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max); 102 103 vbias = vandq_u32(vbias, vexpw_max); 104 vf = vmulq_f32(vf, vscale_to_zero); 105 106 const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw); 107 vbias = vmaxq_u32(vbias, vbias_min); 108 109 vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias)); 110 111 uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13); 112 uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf)); 113 uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16); 114 115 vexph = vand_u16(vexph, vget_low_u16(vexph_mask)); 116 vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask)); 117 vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask)); 118 119 uint16x4_t vh = vadd_u16(vmanth, vexph); 120 121 vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh); 122 123 vh = vorr_u16(vh, vsignh); 124 125 vst1_u16(o, vh); o += 4; 126 } 127 if XNN_UNLIKELY(n != 0) { 128 assert(n % sizeof(float) == 0); 129 assert(n >= 1 * sizeof(float)); 130 assert(n <= 3 * sizeof(float)); 131 const float32x4_t vx = vld1q_f32(input); 132 133 const float32x4_t vabsx = vabsq_f32(vx); 134 135 uint32x4_t vbias = vaddq_u32(vreinterpretq_u32_f32(vabsx), vexp_bias); 136 137 float32x4_t vf = vmulq_f32(vabsx, vscale_to_inf); 138 const uint32x4_t vnanmaskw = vcgtq_u32(vreinterpretq_u32_f32(vabsx), vexpw_max); 139 140 vbias = vandq_u32(vbias, vexpw_max); 141 vf = vmulq_f32(vf, vscale_to_zero); 142 143 const uint16x4_t vnanmaskh = vmovn_u32(vnanmaskw); 144 vbias = vmaxq_u32(vbias, vbias_min); 145 146 vf = vaddq_f32(vf, vreinterpretq_f32_u32(vbias)); 147 148 uint16x4_t vexph = vshrn_n_u32(vreinterpretq_u32_f32(vf), 13); 149 uint16x4_t vmanth = vmovn_u32(vreinterpretq_u32_f32(vf)); 150 uint16x4_t vsignh = vshrn_n_u32(vreinterpretq_u32_f32(vx), 16); 151 152 vexph = vand_u16(vexph, vget_low_u16(vexph_mask)); 153 vmanth = vand_u16(vmanth, vget_low_u16(vmanth_mask)); 154 vsignh = vand_u16(vsignh, vget_low_u16(vsignh_mask)); 155 156 uint16x4_t vh = vadd_u16(vmanth, vexph); 157 158 vh = vbsl_u16(vnanmaskh, vget_low_u16(vnanh), vh); 159 160 vh = vorr_u16(vh, vsignh); 161 162 if (n & (2 * sizeof(float))) { 163 vst1_lane_u32((void*) o, vreinterpret_u32_u16(vh), 0); o += 2; 164 vh = vext_u16(vh, vh, 2); 165 } 166 if (n & (1 * sizeof(float))) { 167 vst1_lane_u16(o, vh, 0); 168 } 169 } 170} 171