/external/XNNPACK/src/f32-gemm/gen/ |
D | 8x8s4-minmax-neonfma.c | 111 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() local 124 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c0); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 132 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c0); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 141 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 153 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 161 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 170 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 182 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c2); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 190 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c2); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() 199 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neonfma() [all …]
|
D | 8x8s4-minmax-neon.c | 111 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() local 124 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 132 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 141 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 153 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 161 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 170 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 182 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 190 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() 199 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemm_minmax_ukernel_8x8s4__neon() [all …]
|
/external/XNNPACK/src/f32-gemm/gen-inc/ |
D | 8x8s4inc-minmax-neon.c | 113 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() local 126 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 134 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 143 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 155 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 163 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 172 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 184 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 192 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() 201 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neon() [all …]
|
D | 8x8s4inc-minmax-neonfma.c | 113 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() local 126 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c0); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 134 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c0); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 143 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 155 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 163 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 172 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 184 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c2); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 192 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c2); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() 201 va7 = vextq_f32(va7, va7, 1); in xnn_f32_gemminc_minmax_ukernel_8x8s4__neonfma() [all …]
|
/external/XNNPACK/src/f32-igemm/gen/ |
D | 8x8s4-minmax-neonfma.c | 144 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() local 157 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c0); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 165 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c0); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 174 va7 = vextq_f32(va7, va7, 1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 186 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 194 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 203 va7 = vextq_f32(va7, va7, 1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 215 vacc7x0123 = vfmaq_f32(vacc7x0123, va7, vb0123c2); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 223 vacc7x4567 = vfmaq_f32(vacc7x4567, va7, vb4567c2); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() 232 va7 = vextq_f32(va7, va7, 1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neonfma() [all …]
|
D | 8x8s4-minmax-neon.c | 144 float32x4_t va7 = vld1q_f32(a7); a7 += 4; in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() local 157 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 165 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 174 va7 = vextq_f32(va7, va7, 1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 186 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 194 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 203 va7 = vextq_f32(va7, va7, 1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 215 vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 223 vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() 232 va7 = vextq_f32(va7, va7, 1); in xnn_f32_igemm_minmax_ukernel_8x8s4__neon() [all …]
|
/external/XNNPACK/src/f16-gemm/gen/ |
D | 8x8-minmax-neonfp16arith-ld64.c | 105 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() local 117 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 126 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 147 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 156 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 177 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 186 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 207 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 216 const float16x8_t va7c3 = vdupq_lane_f16(va7, 3); in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() 239 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_gemm_minmax_ukernel_8x8__neonfp16arith_ld64() local [all …]
|
D | 8x16-minmax-neonfp16arith-ld64.c | 113 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() local 126 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 134 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc0, va7, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 143 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 173 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 181 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc1, va7, 1); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 190 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 220 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 228 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc2, va7, 2); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() 237 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64() [all …]
|
/external/XNNPACK/src/f16-igemm/gen/ |
D | 8x8-minmax-neonfp16arith-ld64.c | 137 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() local 149 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 158 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 179 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 188 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 209 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 218 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 239 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 248 const float16x8_t va7c3 = vdupq_lane_f16(va7, 3); in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() 269 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_igemm_minmax_ukernel_8x8__neonfp16arith_ld64() local [all …]
|
D | 8x16-minmax-neonfp16arith-ld64.c | 145 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() local 158 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 166 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc0, va7, 0); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 175 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 205 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 213 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc1, va7, 1); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 222 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 252 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 260 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc2, va7, 2); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() 269 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2); in xnn_f16_igemm_minmax_ukernel_8x16__neonfp16arith_ld64() [all …]
|
/external/XNNPACK/src/f16-gemm/gen-inc/ |
D | 8x8inc-minmax-neonfp16arith-ld64.c | 107 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() local 119 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 128 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 149 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 158 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 179 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 188 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 209 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c3, va7, 3); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 218 const float16x8_t va7c3 = vdupq_lane_f16(va7, 3); in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() 241 const float16x8_t va7 = vld1q_dup_f16(a7); a7 += 1; in xnn_f16_gemminc_minmax_ukernel_8x8__neonfp16arith_ld64() local [all …]
|
D | 8x16inc-minmax-neonfp16arith-ld64.c | 115 const float16x4_t va7 = vld1_f16(a7); a7 += 4; in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() local 128 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 136 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc0, va7, 0); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 145 const float16x8_t va7c0 = vdupq_lane_f16(va7, 0); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 175 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c1, va7, 1); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 183 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc1, va7, 1); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 192 const float16x8_t va7c1 = vdupq_lane_f16(va7, 1); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 222 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c2, va7, 2); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 230 vacc7x89ABCDEF = vfmaq_lane_f16(vacc7x89ABCDEF, vb89ABCDEFc2, va7, 2); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() 239 const float16x8_t va7c2 = vdupq_lane_f16(va7, 2); in xnn_f16_gemminc_minmax_ukernel_8x16__neonfp16arith_ld64() [all …]
|
/external/XNNPACK/src/f32-vbinary/gen/ |
D | vrdivc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vrdivc_ukernel__scalar_x8() local 50 float vy7 = vb / va7; in xnn_f32_vrdivc_ukernel__scalar_x8()
|
D | vaddc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vaddc_ukernel__scalar_x8() local 50 float vy7 = va7 + vb; in xnn_f32_vaddc_ukernel__scalar_x8()
|
D | vrsubc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vrsubc_ukernel__scalar_x8() local 50 float vy7 = vb - va7; in xnn_f32_vrsubc_ukernel__scalar_x8()
|
D | vsubc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vsubc_ukernel__scalar_x8() local 50 float vy7 = va7 - vb; in xnn_f32_vsubc_ukernel__scalar_x8()
|
D | vmulc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmulc_ukernel__scalar_x8() local 50 float vy7 = va7 * vb; in xnn_f32_vmulc_ukernel__scalar_x8()
|
D | vdivc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vdivc_ukernel__scalar_x8() local 50 float vy7 = va7 / vb; in xnn_f32_vdivc_ukernel__scalar_x8()
|
D | vminc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vminc_ukernel__scalar_x8() local 50 float vy7 = math_min_f32(va7, vb); in xnn_f32_vminc_ukernel__scalar_x8()
|
D | vminc-wasm-x8.c | 40 const float va7 = a[7]; in xnn_f32_vminc_ukernel__wasm_x8() local 50 float vy7 = __builtin_wasm_min_f32(va7, vb); in xnn_f32_vminc_ukernel__wasm_x8()
|
D | vmaxc-wasm-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmaxc_ukernel__wasm_x8() local 50 float vy7 = __builtin_wasm_max_f32(va7, vb); in xnn_f32_vmaxc_ukernel__wasm_x8()
|
D | vmaxc-scalar-x8.c | 40 const float va7 = a[7]; in xnn_f32_vmaxc_ukernel__scalar_x8() local 50 float vy7 = math_max_f32(va7, vb); in xnn_f32_vmaxc_ukernel__scalar_x8()
|
D | vadd-scalar-x8.c | 39 const float va7 = a[7]; in xnn_f32_vadd_ukernel__scalar_x8() local 59 float vy7 = va7 + vb7; in xnn_f32_vadd_ukernel__scalar_x8()
|
D | vdiv-scalar-x8.c | 39 const float va7 = a[7]; in xnn_f32_vdiv_ukernel__scalar_x8() local 59 float vy7 = va7 / vb7; in xnn_f32_vdiv_ukernel__scalar_x8()
|
D | vmul-scalar-x8.c | 39 const float va7 = a[7]; in xnn_f32_vmul_ukernel__scalar_x8() local 59 float vy7 = va7 * vb7; in xnn_f32_vmul_ukernel__scalar_x8()
|