/external/mesa3d/src/mesa/tnl_dd/ |
D | t_dd_triemit.h | 10 #define COPY_DWORDS( j, vb, vertsize, v ) \ argument 14 : "=%c" (j), "=D" (vb), "=S" (__tmp) \ 16 "D" ((long)vb), \ 20 #define COPY_DWORDS( j, vb, vertsize, v ) \ argument 23 vb[j] = ((GLuint *)v)[j]; \ 24 vb += vertsize; \ 38 GLuint *vb = (GLuint *)ALLOC_VERTS( 4, vertsize); in TAG() local 49 COPY_DWORDS( j, vb, vertsize, v0 ); in TAG() 50 COPY_DWORDS( j, vb, vertsize, v1 ); in TAG() 51 COPY_DWORDS( j, vb, vertsize, v2 ); in TAG() [all …]
|
/external/f2fs-tools/tools/sg_write_buffer/ |
D | sg_pt_linux_nvme.c | 194 int vb) in mk_sense_asc_ascq() argument 203 if (vb) in mk_sense_asc_ascq() 211 if (vb > 3) in mk_sense_asc_ascq() 217 mk_sense_from_nvme_status(struct sg_pt_linux_scsi * ptp, int vb) in mk_sense_from_nvme_status() argument 242 if (vb > 3) in mk_sense_from_nvme_status() 250 int in_bit, int vb) in mk_sense_invalid_fld() argument 261 if (vb) in mk_sense_invalid_fld() 286 if (vb > 3) in mk_sense_invalid_fld() 301 int time_secs, int vb) in do_nvme_admin_cmd() argument 311 if (vb > 2) { in do_nvme_admin_cmd() [all …]
|
/external/python/cpython2/Lib/ctypes/test/ |
D | test_wintypes.py | 16 vb = wintypes.VARIANT_BOOL() 17 self.assertIs(vb.value, False) 18 vb.value = True 19 self.assertIs(vb.value, True) 20 vb.value = true_value 21 self.assertIs(vb.value, True) 30 vb = wintypes.VARIANT_BOOL() 31 vb.value = set_value 32 self.assertIs(vb.value, True) 34 vb = wintypes.VARIANT_BOOL() [all …]
|
/external/python/cpython3/Lib/ctypes/test/ |
D | test_wintypes.py | 16 vb = wintypes.VARIANT_BOOL() 17 self.assertIs(vb.value, False) 18 vb.value = True 19 self.assertIs(vb.value, True) 20 vb.value = true_value 21 self.assertIs(vb.value, True) 30 vb = wintypes.VARIANT_BOOL() 31 vb.value = set_value 32 self.assertIs(vb.value, True) 34 vb = wintypes.VARIANT_BOOL() [all …]
|
/external/clang/test/PCH/ |
D | cxx1y-variable-templates.cpp | 38 template<typename T> extern T vb; 55 template<typename T> T vb = T(); variable 56 template<> constexpr float vb<float> = 1.5; variable 69 template<typename T> T vb = T(10); variable 70 template<> extern float vb<float>; 88 template<typename T> T vb = T(100); variable 104 template<> float vb<float> = 1.5; variable 105 template int vb<int>; variable 134 template<> constexpr float vb<float> = 2.5; variable 135 template const int vb<const int>; variable [all …]
|
/external/mesa3d/src/gallium/drivers/svga/ |
D | svga_state_vdecl.c | 70 const struct pipe_vertex_buffer *vb = in emit_hw_vs_vdecl() local 71 &svga->curr.vb[ve[i].vertex_buffer_index]; in emit_hw_vs_vdecl() 73 unsigned int offset = vb->buffer_offset + ve[i].src_offset; in emit_hw_vs_vdecl() 76 if (!vb->buffer.resource) in emit_hw_vs_vdecl() 79 buffer = svga_buffer(vb->buffer.resource); in emit_hw_vs_vdecl() 82 if (vb->stride) in emit_hw_vs_vdecl() 83 tmp_neg_bias = (tmp_neg_bias + vb->stride - 1) / vb->stride; in emit_hw_vs_vdecl() 89 const struct pipe_vertex_buffer *vb = in emit_hw_vs_vdecl() local 90 &svga->curr.vb[ve[i].vertex_buffer_index]; in emit_hw_vs_vdecl() 94 if (!vb->buffer.resource) in emit_hw_vs_vdecl() [all …]
|
/external/XNNPACK/src/f32-spmm/gen/ |
D | 16x4-neonfma.c | 64 const float32x4_t vb = vld1q_f32(w); w += 4; in xnn_f32_spmm_ukernel_16x4__neonfma() local 66 vacc0123c0 = vfmaq_laneq_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_16x4__neonfma() 67 vacc4567c0 = vfmaq_laneq_f32(vacc4567c0, va4567, vb, 0); in xnn_f32_spmm_ukernel_16x4__neonfma() 68 vacc89ABc0 = vfmaq_laneq_f32(vacc89ABc0, va89AB, vb, 0); in xnn_f32_spmm_ukernel_16x4__neonfma() 69 vaccCDEFc0 = vfmaq_laneq_f32(vaccCDEFc0, vaCDEF, vb, 0); in xnn_f32_spmm_ukernel_16x4__neonfma() 70 vacc0123c1 = vfmaq_laneq_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_16x4__neonfma() 71 vacc4567c1 = vfmaq_laneq_f32(vacc4567c1, va4567, vb, 1); in xnn_f32_spmm_ukernel_16x4__neonfma() 72 vacc89ABc1 = vfmaq_laneq_f32(vacc89ABc1, va89AB, vb, 1); in xnn_f32_spmm_ukernel_16x4__neonfma() 73 vaccCDEFc1 = vfmaq_laneq_f32(vaccCDEFc1, vaCDEF, vb, 1); in xnn_f32_spmm_ukernel_16x4__neonfma() 74 vacc0123c2 = vfmaq_laneq_f32(vacc0123c2, va0123, vb, 2); in xnn_f32_spmm_ukernel_16x4__neonfma() [all …]
|
D | 12x4-neonfma.c | 58 const float32x4_t vb = vld1q_f32(w); w += 4; in xnn_f32_spmm_ukernel_12x4__neonfma() local 60 vacc0123c0 = vfmaq_laneq_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_12x4__neonfma() 61 vacc4567c0 = vfmaq_laneq_f32(vacc4567c0, va4567, vb, 0); in xnn_f32_spmm_ukernel_12x4__neonfma() 62 vacc89ABc0 = vfmaq_laneq_f32(vacc89ABc0, va89AB, vb, 0); in xnn_f32_spmm_ukernel_12x4__neonfma() 63 vacc0123c1 = vfmaq_laneq_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_12x4__neonfma() 64 vacc4567c1 = vfmaq_laneq_f32(vacc4567c1, va4567, vb, 1); in xnn_f32_spmm_ukernel_12x4__neonfma() 65 vacc89ABc1 = vfmaq_laneq_f32(vacc89ABc1, va89AB, vb, 1); in xnn_f32_spmm_ukernel_12x4__neonfma() 66 vacc0123c2 = vfmaq_laneq_f32(vacc0123c2, va0123, vb, 2); in xnn_f32_spmm_ukernel_12x4__neonfma() 67 vacc4567c2 = vfmaq_laneq_f32(vacc4567c2, va4567, vb, 2); in xnn_f32_spmm_ukernel_12x4__neonfma() 68 vacc89ABc2 = vfmaq_laneq_f32(vacc89ABc2, va89AB, vb, 2); in xnn_f32_spmm_ukernel_12x4__neonfma() [all …]
|
D | 16x2-neonfma.c | 56 const float32x2_t vb = vld1_f32(w); w += 2; in xnn_f32_spmm_ukernel_16x2__neonfma() local 58 vacc0123c0 = vfmaq_lane_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_16x2__neonfma() 59 vacc4567c0 = vfmaq_lane_f32(vacc4567c0, va4567, vb, 0); in xnn_f32_spmm_ukernel_16x2__neonfma() 60 vacc89ABc0 = vfmaq_lane_f32(vacc89ABc0, va89AB, vb, 0); in xnn_f32_spmm_ukernel_16x2__neonfma() 61 vaccCDEFc0 = vfmaq_lane_f32(vaccCDEFc0, vaCDEF, vb, 0); in xnn_f32_spmm_ukernel_16x2__neonfma() 62 vacc0123c1 = vfmaq_lane_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_16x2__neonfma() 63 vacc4567c1 = vfmaq_lane_f32(vacc4567c1, va4567, vb, 1); in xnn_f32_spmm_ukernel_16x2__neonfma() 64 vacc89ABc1 = vfmaq_lane_f32(vacc89ABc1, va89AB, vb, 1); in xnn_f32_spmm_ukernel_16x2__neonfma() 65 vaccCDEFc1 = vfmaq_lane_f32(vaccCDEFc1, vaCDEF, vb, 1); in xnn_f32_spmm_ukernel_16x2__neonfma() 114 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_16x2__neonfma() local [all …]
|
D | 8x4-neonfma.c | 53 const float32x4_t vb = vld1q_f32(w); w += 4; in xnn_f32_spmm_ukernel_8x4__neonfma() local 55 vacc0123c0 = vfmaq_laneq_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_8x4__neonfma() 56 vacc4567c0 = vfmaq_laneq_f32(vacc4567c0, va4567, vb, 0); in xnn_f32_spmm_ukernel_8x4__neonfma() 57 vacc0123c1 = vfmaq_laneq_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_8x4__neonfma() 58 vacc4567c1 = vfmaq_laneq_f32(vacc4567c1, va4567, vb, 1); in xnn_f32_spmm_ukernel_8x4__neonfma() 59 vacc0123c2 = vfmaq_laneq_f32(vacc0123c2, va0123, vb, 2); in xnn_f32_spmm_ukernel_8x4__neonfma() 60 vacc4567c2 = vfmaq_laneq_f32(vacc4567c2, va4567, vb, 2); in xnn_f32_spmm_ukernel_8x4__neonfma() 61 vacc0123c3 = vfmaq_laneq_f32(vacc0123c3, va0123, vb, 3); in xnn_f32_spmm_ukernel_8x4__neonfma() 62 vacc4567c3 = vfmaq_laneq_f32(vacc4567c3, va4567, vb, 3); in xnn_f32_spmm_ukernel_8x4__neonfma() 107 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_8x4__neonfma() local [all …]
|
D | 12x2-neonfma.c | 52 const float32x2_t vb = vld1_f32(w); w += 2; in xnn_f32_spmm_ukernel_12x2__neonfma() local 54 vacc0123c0 = vfmaq_lane_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_12x2__neonfma() 55 vacc4567c0 = vfmaq_lane_f32(vacc4567c0, va4567, vb, 0); in xnn_f32_spmm_ukernel_12x2__neonfma() 56 vacc89ABc0 = vfmaq_lane_f32(vacc89ABc0, va89AB, vb, 0); in xnn_f32_spmm_ukernel_12x2__neonfma() 57 vacc0123c1 = vfmaq_lane_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_12x2__neonfma() 58 vacc4567c1 = vfmaq_lane_f32(vacc4567c1, va4567, vb, 1); in xnn_f32_spmm_ukernel_12x2__neonfma() 59 vacc89ABc1 = vfmaq_lane_f32(vacc89ABc1, va89AB, vb, 1); in xnn_f32_spmm_ukernel_12x2__neonfma() 100 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_12x2__neonfma() local 101 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_12x2__neonfma() 102 vacc4567 = vfmaq_f32(vacc4567, va4567, vb); in xnn_f32_spmm_ukernel_12x2__neonfma() [all …]
|
D | 8x2-neonfma.c | 49 const float32x2_t vb = vld1_f32(w); w += 2; in xnn_f32_spmm_ukernel_8x2__neonfma() local 51 vacc0123c0 = vfmaq_lane_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_8x2__neonfma() 52 vacc4567c0 = vfmaq_lane_f32(vacc4567c0, va4567, vb, 0); in xnn_f32_spmm_ukernel_8x2__neonfma() 53 vacc0123c1 = vfmaq_lane_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_8x2__neonfma() 54 vacc4567c1 = vfmaq_lane_f32(vacc4567c1, va4567, vb, 1); in xnn_f32_spmm_ukernel_8x2__neonfma() 87 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_8x2__neonfma() local 88 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_8x2__neonfma() 89 vacc4567 = vfmaq_f32(vacc4567, va4567, vb); in xnn_f32_spmm_ukernel_8x2__neonfma() 124 const float32x2_t vb = vld1_f32(w); w += 2; in xnn_f32_spmm_ukernel_8x2__neonfma() local 126 vacc0123c0 = vfmaq_lane_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_8x2__neonfma() [all …]
|
D | 4x4-neonfma.c | 48 const float32x4_t vb = vld1q_f32(w); w += 4; in xnn_f32_spmm_ukernel_4x4__neonfma() local 50 vacc0123c0 = vfmaq_laneq_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_4x4__neonfma() 51 vacc0123c1 = vfmaq_laneq_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_4x4__neonfma() 52 vacc0123c2 = vfmaq_laneq_f32(vacc0123c2, va0123, vb, 2); in xnn_f32_spmm_ukernel_4x4__neonfma() 53 vacc0123c3 = vfmaq_laneq_f32(vacc0123c3, va0123, vb, 3); in xnn_f32_spmm_ukernel_4x4__neonfma() 84 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_4x4__neonfma() local 85 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_4x4__neonfma() 119 const float32x4_t vb = vld1q_f32(w); w += 4; in xnn_f32_spmm_ukernel_4x4__neonfma() local 121 vacc01c0 = vfma_laneq_f32(vacc01c0, va01, vb, 0); in xnn_f32_spmm_ukernel_4x4__neonfma() 122 vacc01c1 = vfma_laneq_f32(vacc01c1, va01, vb, 1); in xnn_f32_spmm_ukernel_4x4__neonfma() [all …]
|
D | 4x2-neonfma.c | 46 const float32x2_t vb = vld1_f32(w); w += 2; in xnn_f32_spmm_ukernel_4x2__neonfma() local 48 vacc0123c0 = vfmaq_lane_f32(vacc0123c0, va0123, vb, 0); in xnn_f32_spmm_ukernel_4x2__neonfma() 49 vacc0123c1 = vfmaq_lane_f32(vacc0123c1, va0123, vb, 1); in xnn_f32_spmm_ukernel_4x2__neonfma() 74 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_4x2__neonfma() local 75 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_4x2__neonfma() 107 const float32x2_t vb = vld1_f32(w); w += 2; in xnn_f32_spmm_ukernel_4x2__neonfma() local 109 vacc01c0 = vfma_lane_f32(vacc01c0, va01, vb, 0); in xnn_f32_spmm_ukernel_4x2__neonfma() 110 vacc01c1 = vfma_lane_f32(vacc01c1, va01, vb, 1); in xnn_f32_spmm_ukernel_4x2__neonfma() 135 const float32x2_t vb = vld1_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_4x2__neonfma() local 136 vacc01 = vfma_f32(vacc01, va01, vb); in xnn_f32_spmm_ukernel_4x2__neonfma() [all …]
|
D | 8x1-sse.c | 47 const __m128 vb = _mm_load1_ps(w); w += 1; in xnn_f32_spmm_ukernel_8x1__sse() local 48 vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(va0123, vb)); in xnn_f32_spmm_ukernel_8x1__sse() 49 vacc4567 = _mm_add_ps(vacc4567, _mm_mul_ps(va4567, vb)); in xnn_f32_spmm_ukernel_8x1__sse() 79 const __m128 vb = _mm_load1_ps(w); w += 1; in xnn_f32_spmm_ukernel_8x1__sse() local 80 vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(va0123, vb)); in xnn_f32_spmm_ukernel_8x1__sse() 106 __m128 vb = _mm_load_ss(w); w += 1; in xnn_f32_spmm_ukernel_8x1__sse() local 107 vb = _mm_unpacklo_ps(vb, vb); in xnn_f32_spmm_ukernel_8x1__sse() 108 vacc01 = _mm_add_ps(vacc01, _mm_mul_ps(va01, vb)); in xnn_f32_spmm_ukernel_8x1__sse() 133 const __m128 vb = _mm_load_ss(w); w += 1; in xnn_f32_spmm_ukernel_8x1__sse() local 134 vacc0 = _mm_add_ss(vacc0, _mm_mul_ss(va0, vb)); in xnn_f32_spmm_ukernel_8x1__sse()
|
D | 16x1-neonfma.c | 52 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_16x1__neonfma() local 53 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() 54 vacc4567 = vfmaq_f32(vacc4567, va4567, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() 55 vacc89AB = vfmaq_f32(vacc89AB, va89AB, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() 56 vaccCDEF = vfmaq_f32(vaccCDEF, vaCDEF, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() 94 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_16x1__neonfma() local 95 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() 96 vacc4567 = vfmaq_f32(vacc4567, va4567, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() 124 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_16x1__neonfma() local 125 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_16x1__neonfma() [all …]
|
D | 12x1-neonfma.c | 49 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_12x1__neonfma() local 50 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_12x1__neonfma() 51 vacc4567 = vfmaq_f32(vacc4567, va4567, vb); in xnn_f32_spmm_ukernel_12x1__neonfma() 52 vacc89AB = vfmaq_f32(vacc89AB, va89AB, vb); in xnn_f32_spmm_ukernel_12x1__neonfma() 87 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_12x1__neonfma() local 88 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_12x1__neonfma() 89 vacc4567 = vfmaq_f32(vacc4567, va4567, vb); in xnn_f32_spmm_ukernel_12x1__neonfma() 117 const float32x4_t vb = vld1q_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_12x1__neonfma() local 118 vacc0123 = vfmaq_f32(vacc0123, va0123, vb); in xnn_f32_spmm_ukernel_12x1__neonfma() 143 const float32x2_t vb = vld1_dup_f32(w); w += 1; in xnn_f32_spmm_ukernel_12x1__neonfma() local [all …]
|
D | 8x1-scalar.c | 119 const float vb = *w++; in xnn_f32_spmm_ukernel_8x1__scalar() local 120 vacc0 += va0 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 121 vacc1 += va1 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 122 vacc2 += va2 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 123 vacc3 += va3 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 124 vacc4 += va4 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 125 vacc5 += va5 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 126 vacc6 += va6 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 127 vacc7 += va7 * vb; in xnn_f32_spmm_ukernel_8x1__scalar() 220 const float vb = *w++; in xnn_f32_spmm_ukernel_8x1__scalar() local [all …]
|
D | 4x1-sse.c | 45 const __m128 vb = _mm_load1_ps(w); w += 1; in xnn_f32_spmm_ukernel_4x1__sse() local 46 vacc0123 = _mm_add_ps(vacc0123, _mm_mul_ps(va0123, vb)); in xnn_f32_spmm_ukernel_4x1__sse() 74 __m128 vb = _mm_load_ss(w); w += 1; in xnn_f32_spmm_ukernel_4x1__sse() local 75 vb = _mm_unpacklo_ps(vb, vb); in xnn_f32_spmm_ukernel_4x1__sse() 76 vacc01 = _mm_add_ps(vacc01, _mm_mul_ps(va01, vb)); in xnn_f32_spmm_ukernel_4x1__sse() 101 const __m128 vb = _mm_load_ss(w); w += 1; in xnn_f32_spmm_ukernel_4x1__sse() local 102 vacc0 = _mm_add_ss(vacc0, _mm_mul_ss(va0, vb)); in xnn_f32_spmm_ukernel_4x1__sse()
|
/external/clang/test/SemaCXX/ |
D | destructor.cpp | 244 VB vb; use(vb); in nowarnstack() local 271 VB* vb = new VB[4]; in nowarnarray() local 272 delete[] vb; in nowarnarray() 298 VB* vb = new VB(); in nowarn0() local 299 delete vb; in nowarn0() 302 VB* vb = new VD(); in nowarn0() local 303 delete vb; in nowarn0() 315 void nowarn0_explicit_dtor(F* f, VB* vb, VD* vd, VF* vf) { in nowarn0_explicit_dtor() argument 318 vb->~VB(); in nowarn0_explicit_dtor() 355 simple_ptr<VB> vb(new VB()); in nowarn1() local [all …]
|
/external/mesa3d/src/gallium/auxiliary/vl/ |
D | vl_compositor_gfx.c | 471 gen_rect_verts(struct vertex2f *vb, struct vl_compositor_layer *layer) in gen_rect_verts() argument 475 assert(vb && layer); in gen_rect_verts() 513 vb[ 0].x = tl.x; in gen_rect_verts() 514 vb[ 0].y = tl.y; in gen_rect_verts() 515 vb[ 1].x = layer->src.tl.x; in gen_rect_verts() 516 vb[ 1].y = layer->src.tl.y; in gen_rect_verts() 517 vb[ 2] = layer->zw; in gen_rect_verts() 518 vb[ 3].x = layer->colors[0].x; in gen_rect_verts() 519 vb[ 3].y = layer->colors[0].y; in gen_rect_verts() 520 vb[ 4].x = layer->colors[0].z; in gen_rect_verts() [all …]
|
/external/XNNPACK/src/f16-spmm/gen/ |
D | 32x1-neonfp16arith.c | 56 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_32x1__neonfp16arith() local 57 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() 58 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() 59 vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() 60 vaccOPQRSTUV = vfmaq_f16(vaccOPQRSTUV, vaOPQRSTUV, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() 102 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_32x1__neonfp16arith() local 103 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() 104 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() 132 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_32x1__neonfp16arith() local 133 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith() [all …]
|
D | 24x1-neonfp16arith.c | 54 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith() local 55 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith() 56 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith() 57 vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith() 95 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith() local 96 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith() 97 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith() 125 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith() local 126 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith() 151 const float16x4_t vb = vld1_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith() local [all …]
|
D | 24x1-neonfp16arith-unroll2.c | 84 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() local 85 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() 86 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() 87 vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() 125 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() local 126 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() 127 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() 155 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() local 156 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() 181 const float16x4_t vb = vld1_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_24x1__neonfp16arith_unroll2() local [all …]
|
D | 32x1-neonfp16arith-unroll2.c | 93 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() local 94 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() 95 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() 96 vaccGHIJKLMN = vfmaq_f16(vaccGHIJKLMN, vaGHIJKLMN, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() 97 vaccOPQRSTUV = vfmaq_f16(vaccOPQRSTUV, vaOPQRSTUV, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() 139 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() local 140 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() 141 vacc89ABCDEF = vfmaq_f16(vacc89ABCDEF, va89ABCDEF, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() 169 const float16x8_t vb = vld1q_dup_f16(w); w += 1; in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() local 170 vacc01234567 = vfmaq_f16(vacc01234567, va01234567, vb); in xnn_f16_spmm_ukernel_32x1__neonfp16arith_unroll2() [all …]
|