• Home
  • Raw
  • Download

Lines Matching refs:vb01234567c0

115 …const float16x8_t vb01234567c0 = vld1q_f16(w); w = (const void*) ((uintptr_t) w + sizeof(float16x8…  in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()  local
119 vacc0x01234567 = vfmaq_lane_f16(vacc0x01234567, vb01234567c0, va0, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
120 vacc1x01234567 = vfmaq_lane_f16(vacc1x01234567, vb01234567c0, va1, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
121 vacc2x01234567 = vfmaq_lane_f16(vacc2x01234567, vb01234567c0, va2, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
122 vacc3x01234567 = vfmaq_lane_f16(vacc3x01234567, vb01234567c0, va3, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
123 vacc4x01234567 = vfmaq_lane_f16(vacc4x01234567, vb01234567c0, va4, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
124 vacc5x01234567 = vfmaq_lane_f16(vacc5x01234567, vb01234567c0, va5, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
125 vacc6x01234567 = vfmaq_lane_f16(vacc6x01234567, vb01234567c0, va6, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
126 vacc7x01234567 = vfmaq_lane_f16(vacc7x01234567, vb01234567c0, va7, 0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
145 vacc0x01234567 = vfmaq_f16(vacc0x01234567, va0c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
146 vacc1x01234567 = vfmaq_f16(vacc1x01234567, va1c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
147 vacc2x01234567 = vfmaq_f16(vacc2x01234567, va2c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
148 vacc3x01234567 = vfmaq_f16(vacc3x01234567, va3c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
149 vacc4x01234567 = vfmaq_f16(vacc4x01234567, va4c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
150 vacc5x01234567 = vfmaq_f16(vacc5x01234567, va5c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
151 vacc6x01234567 = vfmaq_f16(vacc6x01234567, va6c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()
152 vacc7x01234567 = vfmaq_f16(vacc7x01234567, va7c0, vb01234567c0); in xnn_f16_gemm_minmax_ukernel_8x16__neonfp16arith_ld64()