1 // Auto-generated file. Do not edit!
2 // Template: src/f32-ibilinear/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/ibilinear.h>
16
17
xnn_f32_ibilinear_ukernel__neonfma_c8(size_t output_pixels,size_t channels,const float ** restrict input,size_t input_offset,const float * restrict weights,float * restrict output,size_t output_increment)18 void xnn_f32_ibilinear_ukernel__neonfma_c8(
19 size_t output_pixels,
20 size_t channels,
21 const float**restrict input,
22 size_t input_offset,
23 const float*restrict weights,
24 float*restrict output,
25 size_t output_increment) XNN_DISABLE_TSAN
26 {
27 assert(output_pixels != 0);
28 assert(channels != 0);
29 assert(channels % sizeof(float) == 0);
30
31 do {
32 const float* i0 = (const float*) ((uintptr_t) input[0] + input_offset);
33 const float* i1 = (const float*) ((uintptr_t) input[1] + input_offset);
34 const float* i2 = (const float*) ((uintptr_t) input[2] + input_offset);
35 const float* i3 = (const float*) ((uintptr_t) input[3] + input_offset);
36 input += 4;
37
38 const float32x2_t valphahv = vld1_f32(weights); weights += 2;
39 #if XNN_ARCH_ARM
40 const float32x4_t valphah = vdupq_lane_f32(valphahv, 0);
41 const float32x4_t valphav = vdupq_lane_f32(valphahv, 1);
42 #endif
43
44 size_t c = channels;
45 for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
46 const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4;
47 const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4;
48 const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4;
49 const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4;
50 const float32x4_t vtl4567 = vld1q_f32(i0); i0 += 4;
51 const float32x4_t vtr4567 = vld1q_f32(i1); i1 += 4;
52 const float32x4_t vbl4567 = vld1q_f32(i2); i2 += 4;
53 const float32x4_t vbr4567 = vld1q_f32(i3); i3 += 4;
54
55 const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123);
56 const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123);
57 const float32x4_t vtd4567 = vsubq_f32(vtr4567, vtl4567);
58 const float32x4_t vbd4567 = vsubq_f32(vbr4567, vbl4567);
59
60 #if XNN_ARCH_ARM
61 const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah);
62 const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah);
63 const float32x4_t vt4567 = vfmaq_f32(vtl4567, vtd4567, valphah);
64 const float32x4_t vb4567 = vfmaq_f32(vbl4567, vbd4567, valphah);
65 #else
66 const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
67 const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
68 const float32x4_t vt4567 = vfmaq_lane_f32(vtl4567, vtd4567, valphahv, 0);
69 const float32x4_t vb4567 = vfmaq_lane_f32(vbl4567, vbd4567, valphahv, 0);
70 #endif
71
72 const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123);
73 const float32x4_t vd4567 = vsubq_f32(vb4567, vt4567);
74
75 #if XNN_ARCH_ARM
76 const float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav);
77 const float32x4_t vo4567 = vfmaq_f32(vt4567, vd4567, valphav);
78 #else
79 const float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1);
80 const float32x4_t vo4567 = vfmaq_lane_f32(vt4567, vd4567, valphahv, 1);
81 #endif
82
83 vst1q_f32(output, vo0123); output += 4;
84 vst1q_f32(output, vo4567); output += 4;
85 }
86 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
87 const float32x4_t vtl0123 = vld1q_f32(i0); i0 += 4;
88 const float32x4_t vtr0123 = vld1q_f32(i1); i1 += 4;
89 const float32x4_t vbl0123 = vld1q_f32(i2); i2 += 4;
90 const float32x4_t vbr0123 = vld1q_f32(i3); i3 += 4;
91
92 const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123);
93 const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123);
94
95 #if XNN_ARCH_ARM
96 const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah);
97 const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah);
98 #else
99 const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
100 const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
101 #endif
102
103 const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123);
104
105 #if XNN_ARCH_ARM
106 const float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav);
107 #else
108 const float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1);
109 #endif
110
111 vst1q_f32(output, vo0123);
112 output += 4;
113 }
114 if XNN_UNLIKELY(c != 0) {
115 const float32x4_t vtl0123 = vld1q_f32(i0);
116 const float32x4_t vtr0123 = vld1q_f32(i1);
117 const float32x4_t vbl0123 = vld1q_f32(i2);
118 const float32x4_t vbr0123 = vld1q_f32(i3);
119
120 const float32x4_t vtd0123 = vsubq_f32(vtr0123, vtl0123);
121 const float32x4_t vbd0123 = vsubq_f32(vbr0123, vbl0123);
122
123 #if XNN_ARCH_ARM
124 const float32x4_t vt0123 = vfmaq_f32(vtl0123, vtd0123, valphah);
125 const float32x4_t vb0123 = vfmaq_f32(vbl0123, vbd0123, valphah);
126 #else
127 const float32x4_t vt0123 = vfmaq_lane_f32(vtl0123, vtd0123, valphahv, 0);
128 const float32x4_t vb0123 = vfmaq_lane_f32(vbl0123, vbd0123, valphahv, 0);
129 #endif
130
131 const float32x4_t vd0123 = vsubq_f32(vb0123, vt0123);
132
133 #if XNN_ARCH_ARM
134 float32x4_t vo0123 = vfmaq_f32(vt0123, vd0123, valphav);
135 #else
136 float32x4_t vo0123 = vfmaq_lane_f32(vt0123, vd0123, valphahv, 1);
137 #endif
138
139 float32x2_t vo01 = vget_low_f32(vo0123);
140 if (c & (2 * sizeof(float))) {
141 vst1_f32(output, vo01); output += 2;
142 vo01 = vget_high_f32(vo0123);
143 }
144 if (c & (1 * sizeof(float))) {
145 vst1_lane_f32(output, vo01, 0); output += 1;
146 }
147 }
148
149 output = (float*) ((uintptr_t) output + output_increment);
150 } while (--output_pixels != 0);
151 }
152