• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-igemm/neon-shuffle.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/igemm.h>
15 
16 
xnn_f32_igemm_minmax_ukernel_8x8s4__neon(size_t mr,size_t nc,size_t kc,size_t ks,const float ** restrict a,const float * restrict w,float * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const float * zero,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_f32_igemm_minmax_ukernel_8x8s4__neon(
18     size_t mr,
19     size_t nc,
20     size_t kc,
21     size_t ks,
22     const float**restrict a,
23     const float*restrict w,
24     float*restrict c,
25     size_t cm_stride,
26     size_t cn_stride,
27     size_t a_offset,
28     const float* zero,
29     const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
30 {
31   assert(mr != 0);
32   assert(mr <= 8);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(float) == 0);
36   assert(ks != 0);
37   assert(ks % (8 * sizeof(void*)) == 0);
38   assert(a_offset % sizeof(float) == 0);
39   assert(a != NULL);
40   assert(w != NULL);
41   assert(c != NULL);
42 
43   float* c0 = c;
44   float* c1 = (float*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     c1 = c0;
47   }
48   float* c2 = (float*) ((uintptr_t) c1 + cm_stride);
49   if XNN_UNPREDICTABLE(mr <= 2) {
50     c2 = c1;
51   }
52   float* c3 = (float*) ((uintptr_t) c2 + cm_stride);
53   if XNN_UNPREDICTABLE(mr < 4) {
54     c3 = c2;
55   }
56   float* c4 = (float*) ((uintptr_t) c3 + cm_stride);
57   if XNN_UNPREDICTABLE(mr <= 4) {
58     c4 = c3;
59   }
60   float* c5 = (float*) ((uintptr_t) c4 + cm_stride);
61   if XNN_UNPREDICTABLE(mr < 6) {
62     c5 = c4;
63   }
64   float* c6 = (float*) ((uintptr_t) c5 + cm_stride);
65   if XNN_UNPREDICTABLE(mr <= 6) {
66     c6 = c5;
67   }
68   float* c7 = (float*) ((uintptr_t) c6 + cm_stride);
69   if XNN_UNPREDICTABLE(mr != 8) {
70     c7 = c6;
71   }
72 
73   do {
74     float32x4_t vacc0x0123 = vld1q_f32(w); w += 4;
75     float32x4_t vacc0x4567 = vld1q_f32(w); w += 4;
76     float32x4_t vacc1x0123 = vacc0x0123;
77     float32x4_t vacc1x4567 = vacc0x4567;
78     float32x4_t vacc2x0123 = vacc0x0123;
79     float32x4_t vacc2x4567 = vacc0x4567;
80     float32x4_t vacc3x0123 = vacc0x0123;
81     float32x4_t vacc3x4567 = vacc0x4567;
82     float32x4_t vacc4x0123 = vacc0x0123;
83     float32x4_t vacc4x4567 = vacc0x4567;
84     float32x4_t vacc5x0123 = vacc0x0123;
85     float32x4_t vacc5x4567 = vacc0x4567;
86     float32x4_t vacc6x0123 = vacc0x0123;
87     float32x4_t vacc6x4567 = vacc0x4567;
88     float32x4_t vacc7x0123 = vacc0x0123;
89     float32x4_t vacc7x4567 = vacc0x4567;
90 
91     size_t p = ks;
92     do {
93       const float* restrict a0 = a[0];
94       assert(a0 != NULL);
95       if XNN_UNPREDICTABLE(a0 != zero) {
96         a0 = (const float*) ((uintptr_t) a0 + a_offset);
97       }
98       const float* restrict a1 = a[1];
99       assert(a1 != NULL);
100       if XNN_UNPREDICTABLE(a1 != zero) {
101         a1 = (const float*) ((uintptr_t) a1 + a_offset);
102       }
103       const float* restrict a2 = a[2];
104       assert(a2 != NULL);
105       if XNN_UNPREDICTABLE(a2 != zero) {
106         a2 = (const float*) ((uintptr_t) a2 + a_offset);
107       }
108       const float* restrict a3 = a[3];
109       assert(a3 != NULL);
110       if XNN_UNPREDICTABLE(a3 != zero) {
111         a3 = (const float*) ((uintptr_t) a3 + a_offset);
112       }
113       const float* restrict a4 = a[4];
114       assert(a4 != NULL);
115       if XNN_UNPREDICTABLE(a4 != zero) {
116         a4 = (const float*) ((uintptr_t) a4 + a_offset);
117       }
118       const float* restrict a5 = a[5];
119       assert(a5 != NULL);
120       if XNN_UNPREDICTABLE(a5 != zero) {
121         a5 = (const float*) ((uintptr_t) a5 + a_offset);
122       }
123       const float* restrict a6 = a[6];
124       assert(a6 != NULL);
125       if XNN_UNPREDICTABLE(a6 != zero) {
126         a6 = (const float*) ((uintptr_t) a6 + a_offset);
127       }
128       const float* restrict a7 = a[7];
129       assert(a7 != NULL);
130       if XNN_UNPREDICTABLE(a7 != zero) {
131         a7 = (const float*) ((uintptr_t) a7 + a_offset);
132       }
133       a += 8;
134 
135       size_t k = kc;
136       while (k >= 4 * sizeof(float)) {
137         float32x4_t va0 = vld1q_f32(a0); a0 += 4;
138         float32x4_t va1 = vld1q_f32(a1); a1 += 4;
139         float32x4_t va2 = vld1q_f32(a2); a2 += 4;
140         float32x4_t va3 = vld1q_f32(a3); a3 += 4;
141         float32x4_t va4 = vld1q_f32(a4); a4 += 4;
142         float32x4_t va5 = vld1q_f32(a5); a5 += 4;
143         float32x4_t va6 = vld1q_f32(a6); a6 += 4;
144         float32x4_t va7 = vld1q_f32(a7); a7 += 4;
145 
146 
147         const float32x4_t vb0123c0 = vld1q_f32(w + 0);
148         const float32x4_t vb4567c0 = vld1q_f32(w + 4);
149 
150         vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c0);
151         vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c0);
152         vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c0);
153         vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c0);
154         vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c0);
155         vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c0);
156         vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c0);
157         vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c0);
158         vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c0);
159         vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c0);
160         vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c0);
161         vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c0);
162         vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c0);
163         vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c0);
164         vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c0);
165         vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c0);
166 
167         va0 = vextq_f32(va0, va0, 1);
168         va1 = vextq_f32(va1, va1, 1);
169         va2 = vextq_f32(va2, va2, 1);
170         va3 = vextq_f32(va3, va3, 1);
171         va4 = vextq_f32(va4, va4, 1);
172         va5 = vextq_f32(va5, va5, 1);
173         va6 = vextq_f32(va6, va6, 1);
174         va7 = vextq_f32(va7, va7, 1);
175 
176         const float32x4_t vb0123c1 = vld1q_f32(w + 8);
177         const float32x4_t vb4567c1 = vld1q_f32(w + 12);
178 
179         vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c1);
180         vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c1);
181         vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c1);
182         vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c1);
183         vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c1);
184         vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c1);
185         vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c1);
186         vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c1);
187         vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c1);
188         vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c1);
189         vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c1);
190         vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c1);
191         vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c1);
192         vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c1);
193         vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c1);
194         vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c1);
195 
196         va0 = vextq_f32(va0, va0, 1);
197         va1 = vextq_f32(va1, va1, 1);
198         va2 = vextq_f32(va2, va2, 1);
199         va3 = vextq_f32(va3, va3, 1);
200         va4 = vextq_f32(va4, va4, 1);
201         va5 = vextq_f32(va5, va5, 1);
202         va6 = vextq_f32(va6, va6, 1);
203         va7 = vextq_f32(va7, va7, 1);
204 
205         const float32x4_t vb0123c2 = vld1q_f32(w + 16);
206         const float32x4_t vb4567c2 = vld1q_f32(w + 20);
207 
208         vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c2);
209         vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c2);
210         vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c2);
211         vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c2);
212         vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c2);
213         vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c2);
214         vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c2);
215         vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c2);
216         vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c2);
217         vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c2);
218         vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c2);
219         vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c2);
220         vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c2);
221         vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c2);
222         vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c2);
223         vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c2);
224 
225         va0 = vextq_f32(va0, va0, 1);
226         va1 = vextq_f32(va1, va1, 1);
227         va2 = vextq_f32(va2, va2, 1);
228         va3 = vextq_f32(va3, va3, 1);
229         va4 = vextq_f32(va4, va4, 1);
230         va5 = vextq_f32(va5, va5, 1);
231         va6 = vextq_f32(va6, va6, 1);
232         va7 = vextq_f32(va7, va7, 1);
233 
234         const float32x4_t vb0123c3 = vld1q_f32(w + 24);
235         const float32x4_t vb4567c3 = vld1q_f32(w + 28);
236 
237         vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123c3);
238         vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123c3);
239         vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123c3);
240         vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123c3);
241         vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123c3);
242         vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123c3);
243         vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123c3);
244         vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123c3);
245         vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567c3);
246         vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567c3);
247         vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567c3);
248         vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567c3);
249         vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567c3);
250         vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567c3);
251         vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567c3);
252         vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567c3);
253 
254 
255         w += 32;
256         k -= 4 * sizeof(float);
257       }
258       if XNN_UNLIKELY(k != 0) {
259         do {
260           const float32x4_t va0 = vld1q_dup_f32(a0); a0 += 1;
261           const float32x4_t va1 = vld1q_dup_f32(a1); a1 += 1;
262           const float32x4_t va2 = vld1q_dup_f32(a2); a2 += 1;
263           const float32x4_t va3 = vld1q_dup_f32(a3); a3 += 1;
264           const float32x4_t va4 = vld1q_dup_f32(a4); a4 += 1;
265           const float32x4_t va5 = vld1q_dup_f32(a5); a5 += 1;
266           const float32x4_t va6 = vld1q_dup_f32(a6); a6 += 1;
267           const float32x4_t va7 = vld1q_dup_f32(a7); a7 += 1;
268 
269           const float32x4_t vb0123 = vld1q_f32(w); w += 4;
270           const float32x4_t vb4567 = vld1q_f32(w); w += 4;
271 
272           vacc0x0123 = vmlaq_f32(vacc0x0123, va0, vb0123);
273           vacc1x0123 = vmlaq_f32(vacc1x0123, va1, vb0123);
274           vacc2x0123 = vmlaq_f32(vacc2x0123, va2, vb0123);
275           vacc3x0123 = vmlaq_f32(vacc3x0123, va3, vb0123);
276           vacc4x0123 = vmlaq_f32(vacc4x0123, va4, vb0123);
277           vacc5x0123 = vmlaq_f32(vacc5x0123, va5, vb0123);
278           vacc6x0123 = vmlaq_f32(vacc6x0123, va6, vb0123);
279           vacc7x0123 = vmlaq_f32(vacc7x0123, va7, vb0123);
280           vacc0x4567 = vmlaq_f32(vacc0x4567, va0, vb4567);
281           vacc1x4567 = vmlaq_f32(vacc1x4567, va1, vb4567);
282           vacc2x4567 = vmlaq_f32(vacc2x4567, va2, vb4567);
283           vacc3x4567 = vmlaq_f32(vacc3x4567, va3, vb4567);
284           vacc4x4567 = vmlaq_f32(vacc4x4567, va4, vb4567);
285           vacc5x4567 = vmlaq_f32(vacc5x4567, va5, vb4567);
286           vacc6x4567 = vmlaq_f32(vacc6x4567, va6, vb4567);
287           vacc7x4567 = vmlaq_f32(vacc7x4567, va7, vb4567);
288 
289           k -= sizeof(float);
290         } while (k != 0);
291       }
292 
293       p -= 8 * sizeof(void*);
294     } while (p != 0);
295 
296     const float32x4_t vmax = vld1q_dup_f32(&params->scalar.max);
297     vacc0x0123 = vminq_f32(vacc0x0123, vmax);
298     vacc1x0123 = vminq_f32(vacc1x0123, vmax);
299     vacc2x0123 = vminq_f32(vacc2x0123, vmax);
300     vacc3x0123 = vminq_f32(vacc3x0123, vmax);
301     vacc4x0123 = vminq_f32(vacc4x0123, vmax);
302     vacc5x0123 = vminq_f32(vacc5x0123, vmax);
303     vacc6x0123 = vminq_f32(vacc6x0123, vmax);
304     vacc7x0123 = vminq_f32(vacc7x0123, vmax);
305     vacc0x4567 = vminq_f32(vacc0x4567, vmax);
306     vacc1x4567 = vminq_f32(vacc1x4567, vmax);
307     vacc2x4567 = vminq_f32(vacc2x4567, vmax);
308     vacc3x4567 = vminq_f32(vacc3x4567, vmax);
309     vacc4x4567 = vminq_f32(vacc4x4567, vmax);
310     vacc5x4567 = vminq_f32(vacc5x4567, vmax);
311     vacc6x4567 = vminq_f32(vacc6x4567, vmax);
312     vacc7x4567 = vminq_f32(vacc7x4567, vmax);
313 
314     const float32x4_t vmin = vld1q_dup_f32(&params->scalar.min);
315     vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
316     vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
317     vacc2x0123 = vmaxq_f32(vacc2x0123, vmin);
318     vacc3x0123 = vmaxq_f32(vacc3x0123, vmin);
319     vacc4x0123 = vmaxq_f32(vacc4x0123, vmin);
320     vacc5x0123 = vmaxq_f32(vacc5x0123, vmin);
321     vacc6x0123 = vmaxq_f32(vacc6x0123, vmin);
322     vacc7x0123 = vmaxq_f32(vacc7x0123, vmin);
323     vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
324     vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
325     vacc2x4567 = vmaxq_f32(vacc2x4567, vmin);
326     vacc3x4567 = vmaxq_f32(vacc3x4567, vmin);
327     vacc4x4567 = vmaxq_f32(vacc4x4567, vmin);
328     vacc5x4567 = vmaxq_f32(vacc5x4567, vmin);
329     vacc6x4567 = vmaxq_f32(vacc6x4567, vmin);
330     vacc7x4567 = vmaxq_f32(vacc7x4567, vmin);
331 
332     if XNN_LIKELY(nc >= 8) {
333       vst1q_f32(c7, vacc7x0123);
334       vst1q_f32(c7 + 4, vacc7x4567);
335       c7 = (float*) ((uintptr_t) c7 + cn_stride);
336       vst1q_f32(c6, vacc6x0123);
337       vst1q_f32(c6 + 4, vacc6x4567);
338       c6 = (float*) ((uintptr_t) c6 + cn_stride);
339       vst1q_f32(c5, vacc5x0123);
340       vst1q_f32(c5 + 4, vacc5x4567);
341       c5 = (float*) ((uintptr_t) c5 + cn_stride);
342       vst1q_f32(c4, vacc4x0123);
343       vst1q_f32(c4 + 4, vacc4x4567);
344       c4 = (float*) ((uintptr_t) c4 + cn_stride);
345       vst1q_f32(c3, vacc3x0123);
346       vst1q_f32(c3 + 4, vacc3x4567);
347       c3 = (float*) ((uintptr_t) c3 + cn_stride);
348       vst1q_f32(c2, vacc2x0123);
349       vst1q_f32(c2 + 4, vacc2x4567);
350       c2 = (float*) ((uintptr_t) c2 + cn_stride);
351       vst1q_f32(c1, vacc1x0123);
352       vst1q_f32(c1 + 4, vacc1x4567);
353       c1 = (float*) ((uintptr_t) c1 + cn_stride);
354       vst1q_f32(c0, vacc0x0123);
355       vst1q_f32(c0 + 4, vacc0x4567);
356       c0 = (float*) ((uintptr_t) c0 + cn_stride);
357 
358       a = (const float**restrict) ((uintptr_t) a - ks);
359       nc -= 8;
360     } else {
361       if (nc & 4) {
362         vst1q_f32(c7, vacc7x0123); c7 += 4;
363         vst1q_f32(c6, vacc6x0123); c6 += 4;
364         vst1q_f32(c5, vacc5x0123); c5 += 4;
365         vst1q_f32(c4, vacc4x0123); c4 += 4;
366         vst1q_f32(c3, vacc3x0123); c3 += 4;
367         vst1q_f32(c2, vacc2x0123); c2 += 4;
368         vst1q_f32(c1, vacc1x0123); c1 += 4;
369         vst1q_f32(c0, vacc0x0123); c0 += 4;
370 
371         vacc7x0123 = vacc7x4567;
372         vacc6x0123 = vacc6x4567;
373         vacc5x0123 = vacc5x4567;
374         vacc4x0123 = vacc4x4567;
375         vacc3x0123 = vacc3x4567;
376         vacc2x0123 = vacc2x4567;
377         vacc1x0123 = vacc1x4567;
378         vacc0x0123 = vacc0x4567;
379       }
380       float32x2_t vacc7x01 = vget_low_f32(vacc7x0123);
381       float32x2_t vacc6x01 = vget_low_f32(vacc6x0123);
382       float32x2_t vacc5x01 = vget_low_f32(vacc5x0123);
383       float32x2_t vacc4x01 = vget_low_f32(vacc4x0123);
384       float32x2_t vacc3x01 = vget_low_f32(vacc3x0123);
385       float32x2_t vacc2x01 = vget_low_f32(vacc2x0123);
386       float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
387       float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
388       if (nc & 2) {
389         vst1_f32(c7, vacc7x01); c7 += 2;
390         vst1_f32(c6, vacc6x01); c6 += 2;
391         vst1_f32(c5, vacc5x01); c5 += 2;
392         vst1_f32(c4, vacc4x01); c4 += 2;
393         vst1_f32(c3, vacc3x01); c3 += 2;
394         vst1_f32(c2, vacc2x01); c2 += 2;
395         vst1_f32(c1, vacc1x01); c1 += 2;
396         vst1_f32(c0, vacc0x01); c0 += 2;
397 
398         vacc7x01 = vget_high_f32(vacc7x0123);
399         vacc6x01 = vget_high_f32(vacc6x0123);
400         vacc5x01 = vget_high_f32(vacc5x0123);
401         vacc4x01 = vget_high_f32(vacc4x0123);
402         vacc3x01 = vget_high_f32(vacc3x0123);
403         vacc2x01 = vget_high_f32(vacc2x0123);
404         vacc1x01 = vget_high_f32(vacc1x0123);
405         vacc0x01 = vget_high_f32(vacc0x0123);
406       }
407       if (nc & 1) {
408         vst1_lane_f32(c7, vacc7x01, 0);
409         vst1_lane_f32(c6, vacc6x01, 0);
410         vst1_lane_f32(c5, vacc5x01, 0);
411         vst1_lane_f32(c4, vacc4x01, 0);
412         vst1_lane_f32(c3, vacc3x01, 0);
413         vst1_lane_f32(c2, vacc2x01, 0);
414         vst1_lane_f32(c1, vacc1x01, 0);
415         vst1_lane_f32(c0, vacc0x01, 0);
416       }
417 
418       nc = 0;
419     }
420   } while (nc != 0);
421 }
422