1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert NR % 8 == 0 7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 8 9#include <assert.h> 10 11#include <arm_neon.h> 12 13#include <xnnpack/common.h> 14 15#include <xnnpack/gemm.h> 16 17 18void xnn_f16_gemm${"inc" if INC else ""}_minmax_ukernel_${MR}x${NR}__neonfp16arith_ld64( 19 size_t mr, 20 size_t nc, 21 size_t kc, 22 const void* restrict a, 23 size_t a_stride, 24 const void* restrict w, 25 void* restrict c, 26 size_t cm_stride, 27 size_t cn_stride, 28 $if INC: 29 const void*restrict acc, 30 const struct xnn_f16_scaleminmax_params params[restrict XNN_MIN_ELEMENTS(1)]) 31{ 32 assert(mr != 0); 33 assert(mr <= ${MR}); 34 assert(nc != 0); 35 assert(kc != 0); 36 assert(kc % sizeof(__fp16) == 0); 37 assert(a != NULL); 38 assert(w != NULL); 39 assert(c != NULL); 40 $if INC: 41 assert(acc != NULL); 42 43 const __fp16* a0 = (const __fp16*) a; 44 __fp16* c0 = (__fp16*) c; 45 $for M in range(1, MR): 46 const __fp16* a${M} = (const __fp16*) ((uintptr_t) a${M-1} + a_stride); 47 __fp16* c${M} = (__fp16*) ((uintptr_t) c${M-1} + cm_stride); 48 $if M % 2 == 0: 49 if XNN_UNPREDICTABLE(mr <= ${M}) { 50 a${M} = a${M-1}; 51 c${M} = c${M-1}; 52 } 53 $elif M + 1 == MR: 54 if XNN_UNPREDICTABLE(mr != ${M+1}) { 55 a${M} = a${M-1}; 56 c${M} = c${M-1}; 57 } 58 $else: 59 if XNN_UNPREDICTABLE(mr < ${M+1}) { 60 a${M} = a${M-1}; 61 c${M} = c${M-1}; 62 } 63 64 do { 65 $if INC: 66 $for M in range(MR): 67 $for N in range(0, NR, 8): 68 float16x8_t vacc${M}x${ABC[N:N+8]} = vld1q_f16(acc); acc = (const void*) ((uintptr_t) acc + sizeof(float16x8_t)); 69 $else: 70 $for N in range(0, NR, 8): 71 float16x8_t vacc0x${ABC[N:N+8]} = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t)); 72 $for M in range(1, MR): 73 $for N in range(0, NR, 8): 74 float16x8_t vacc${M}x${ABC[N:N+8]} = vacc0x${ABC[N:N+8]}; 75 76 size_t k = kc; 77 while (k >= 4 * sizeof(__fp16)) { 78 $for M in range(MR): 79 const float16x4_t va${M} = vld1_f16(a${M}); a${M} += 4; 80 81 $for L in range(4): 82 $for N in range(0, NR, 8): 83 const float16x8_t vb${ABC[N:N+8]}c${L} = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t)); 84 85 #if XNN_ARCH_ARM64 86 $for N in range(0, NR, 8): 87 $for M in range(MR): 88 vacc${M}x${ABC[N:N+8]} = vfmaq_lane_f16(vacc${M}x${ABC[N:N+8]}, vb${ABC[N:N+8]}c${L}, va${M}, ${L}); 89 #else 90 $for M in range(MR): 91 const float16x8_t va${M}c${L} = vdupq_lane_f16(va${M}, ${L}); 92 93 $for N in range(0, NR, 8): 94 $for M in range(MR): 95 vacc${M}x${ABC[N:N+8]} = vfmaq_f16(vacc${M}x${ABC[N:N+8]}, va${M}c${L}, vb${ABC[N:N+8]}c${L}); 96 #endif 97 98 k -= 4 * sizeof(__fp16); 99 } 100 if XNN_UNLIKELY(k != 0) { 101 do { 102 $for M in range(MR): 103 const float16x8_t va${M} = vld1q_dup_f16(a${M}); a${M} += 1; 104 105 $for N in range(0, NR, 8): 106 const float16x8_t vb${ABC[N:N+8]} = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8_t)); 107 108 $for N in range(0, NR, 8): 109 $for M in range(MR): 110 vacc${M}x${ABC[N:N+8]} = vfmaq_f16(vacc${M}x${ABC[N:N+8]}, va${M}, vb${ABC[N:N+8]}); 111 112 k -= sizeof(__fp16); 113 } while (k != 0); 114 } 115 116 const float16x8_t vscale = vld1q_dup_f16((const __fp16*) ¶ms->scale); 117 $for N in range(0, NR, 8): 118 $for M in range(MR): 119 vacc${M}x${ABC[N:N+8]} = vmulq_f16(vacc${M}x${ABC[N:N+8]}, vscale); 120 121 const float16x8_t vmax = vld1q_dup_f16((const __fp16*) ¶ms->max); 122 $for N in range(0, NR, 8): 123 $for M in range(MR): 124 vacc${M}x${ABC[N:N+8]} = vminq_f16(vacc${M}x${ABC[N:N+8]}, vmax); 125 126 const float16x8_t vmin = vld1q_dup_f16((const __fp16*) ¶ms->min); 127 $for N in range(0, NR, 8): 128 $for M in range(MR): 129 vacc${M}x${ABC[N:N+8]} = vmaxq_f16(vacc${M}x${ABC[N:N+8]}, vmin); 130 131 if XNN_LIKELY(nc >= ${NR}) { 132 $for M in range(MR): 133 vst1q_f16(c${M}, vacc${M}x${ABC[0:8]}); 134 $for N in range(8, NR, 8): 135 vst1q_f16(c${M} + ${N}, vacc${M}x${ABC[N:N+8]}); 136 c${M} = (__fp16*) ((uintptr_t) c${M} + cn_stride); 137 138 $for M in range(MR): 139 a${M} = (const __fp16*) ((uintptr_t) a${M} - kc); 140 141 nc -= ${NR}; 142 } else { 143 $for LOG2N in reversed(range(NR.bit_length())): 144 $if NR != 1 << LOG2N: 145 if (nc & ${1 << LOG2N}) { 146 $if LOG2N >= 3: 147 $for N in range(0, 1 << LOG2N, 8): 148 $for M in range(MR): 149 vst1q_f16(c${M}, vacc${M}x${ABC[N:N+8]}); c${M} += 8; 150 151 $for M in range(MR): 152 $for N in range(0, 1 << (LOG2N - 1), 8): 153 vacc${M}x${ABC[N:N+8]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+8]}; 154 $elif LOG2N == 2: 155 $for M in range(MR): 156 vst1_f16(c${M}, vacc${M}x${ABC[0:4]}); c${M} += 4; 157 158 $for M in range(MR): 159 vacc${M}x${ABC[0:4]} = vget_high_f16(vacc${M}x${ABC[0:8]}); 160 $elif LOG2N == 1: 161 $for M in range(MR): 162 vst1_lane_u32(__builtin_assume_aligned(c${M}, 1), vreinterpret_u32_f16(vacc${M}x${ABC[0:4]}), 0); c${M} += 2; 163 164 $for M in range(MR): 165 vacc${M}x${ABC[0:4]} = vext_f16(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}, 2); 166 $elif LOG2N == 0: 167 $for M in range(MR): 168 vst1_lane_f16(c${M}, vacc${M}x${ABC[0:4]}, 0); 169 } 170 $if LOG2N == 3: 171 $for M in range(MR): 172 float16x4_t vacc${M}x${ABC[0:4]} = vget_low_f16(vacc${M}x${ABC[0:8]}); 173 174 nc = 0; 175 } 176 } while (nc != 0); 177} 178