1 #include <arm_neon.h>
2
3 #if __arm__
vpaddq_u32(uint32x4_t,uint32x4_t)4 inline uint32x4_t vpaddq_u32(uint32x4_t, uint32x4_t)
5 {
6 return vdupq_n_u32(0);
7 }
8 #endif
9
vrsqrts_f16(float16x4_t,float16x4_t)10 inline float16x4_t vrsqrts_f16 (float16x4_t, float16x4_t)
11 {
12 return vdup_n_f16(0);
13 }
14
vrsqrtsq_f16(float16x8_t,float16x8_t)15 inline float16x8_t vrsqrtsq_f16 (float16x8_t, float16x8_t)
16 {
17 return vdupq_n_f16(0);
18 }
19
vpmax_f16(float16x4_t,float16x4_t)20 inline float16x4_t vpmax_f16 (float16x4_t, float16x4_t)
21 {
22 return vdup_n_f16(0);
23 }
24
vpadd_f16(float16x4_t,float16x4_t)25 inline float16x4_t vpadd_f16(float16x4_t, float16x4_t)
26 {
27 return vdup_n_f16(0);
28 }
29
vmulq_lane_f16(float16x8_t,float16x4_t,const int)30 inline float16x8_t vmulq_lane_f16 (float16x8_t, float16x4_t, const int)
31 {
32 return vdupq_n_f16(0);
33 }
34
vmul_f16(float16x4_t,float16x4_t)35 inline float16x4_t vmul_f16 (float16x4_t, float16x4_t)
36 {
37 return vdup_n_f16(0);
38 }
39
vadd_f16(float16x4_t,float16x4_t)40 inline float16x4_t vadd_f16 (float16x4_t, float16x4_t)
41 {
42 return vdup_n_f16(0);
43 }
44
vmul_lane_f16(float16x4_t,float16x4_t,const int)45 inline float16x4_t vmul_lane_f16 (float16x4_t, float16x4_t, const int)
46 {
47 return vdup_n_f16(0);
48 }
49
vmul_n_f16(float16x4_t,float16_t)50 inline float16x4_t vmul_n_f16 (float16x4_t, float16_t)
51 {
52 return vdup_n_f16(0);
53 }
54
vmax_f16(float16x4_t,float16x4_t)55 inline float16x4_t vmax_f16(float16x4_t, float16x4_t)
56 {
57 return vdup_n_f16(0);
58 }
59
vcvtq_f16_u16(uint16x8_t)60 inline float16x8_t vcvtq_f16_u16(uint16x8_t)
61 {
62 return vdupq_n_f16(0);
63 }
64
vcvtq_u16_f16(float16x8_t)65 inline uint16x8_t vcvtq_u16_f16(float16x8_t)
66 {
67 return vdupq_n_u16(0);
68 }
69
vcvtq_s16_f16(float16x8_t)70 inline int16x8_t vcvtq_s16_f16(float16x8_t)
71 {
72 return vdupq_n_s16(0);
73 }
74
vaddq_f16(float16x8_t,float16x8_t)75 inline float16x8_t vaddq_f16(float16x8_t, float16x8_t)
76 {
77 return vdupq_n_f16(0);
78 }
79
vsubq_f16(float16x8_t,float16x8_t)80 inline float16x8_t vsubq_f16(float16x8_t, float16x8_t)
81 {
82 return vdupq_n_f16(0);
83 }
84
vmulq_f16(float16x8_t,float16x8_t)85 inline float16x8_t vmulq_f16(float16x8_t, float16x8_t)
86 {
87 return vdupq_n_f16(0);
88 }
89
vmulq_n_f16(float16x8_t,float16_t)90 inline float16x8_t vmulq_n_f16(float16x8_t, float16_t)
91 {
92 return vdupq_n_f16(0);
93 }
94
vfmaq_f16(float16x8_t,float16x8_t,float16x8_t)95 inline float16x8_t vfmaq_f16(float16x8_t, float16x8_t, float16x8_t)
96 {
97 return vdupq_n_f16(0);
98 }
99
vcgeq_f16(float16x8_t,float16x8_t)100 inline uint16x8_t vcgeq_f16(float16x8_t, float16x8_t)
101 {
102 return vdupq_n_u16(0);
103 }
104
vcgtq_f16(float16x8_t,float16x8_t)105 inline uint16x8_t vcgtq_f16(float16x8_t, float16x8_t)
106 {
107 return vdupq_n_u16(0);
108 }
109
vbslq_f16(uint16x8_t,float16x8_t,float16x8_t)110 inline float16x8_t vbslq_f16 (uint16x8_t, float16x8_t, float16x8_t)
111 {
112 return vdupq_n_f16(0);;
113 }
114
vextq_f16(float16x8_t,float16x8_t,int)115 inline float16x8_t vextq_f16(float16x8_t, float16x8_t, int)
116 {
117 return vdupq_n_f16(0);
118 }
119
vabsq_f16(float16x8_t)120 inline float16x8_t vabsq_f16(float16x8_t)
121 {
122 return vdupq_n_f16(0);
123 }
124
vcvtq_f16_s16(float16x8_t)125 inline uint16x8_t vcvtq_f16_s16(float16x8_t)
126 {
127 return vdupq_n_s16(0);
128 }
129
vbsl_f16(uint16x4_t,float16x4_t,float16x4_t)130 inline float16x4_t vbsl_f16 (uint16x4_t,float16x4_t, float16x4_t)
131 {
132 return vdup_n_f16(0);
133 }
134
vrsqrte_f16(float16x4_t)135 inline float16x4_t vrsqrte_f16(float16x4_t)
136 {
137 return vdup_n_f16(0);
138 }
139
vrsqrteq_f16(float16x8_t)140 inline float16x8_t vrsqrteq_f16(float16x8_t)
141 {
142 return vdupq_n_f16(0);
143 }
144
vfmsq_f16(float16x8_t,float16x8_t,float16x8_t)145 inline float16x8_t vfmsq_f16 (float16x8_t, float16x8_t, float16x8_t)
146 {
147 return vdupq_n_f16(0);
148 }
149
vrecpe_f16(float16x4_t)150 inline float16x4_t vrecpe_f16 (float16x4_t)
151 {
152 return vdup_n_f16(0);
153 }
154
vrecpeq_f16(float16x8_t)155 inline float16x8_t vrecpeq_f16 (float16x8_t)
156 {
157 return vdupq_n_f16(0);
158 }
159
vrecps_f16(float16x4_t,float16x4_t)160 inline float16x4_t vrecps_f16 (float16x4_t, float16x4_t)
161 {
162 return vdup_n_f16(0);
163 }
164
vrecpsq_f16(float16x8_t,float16x8_t)165 inline float16x8_t vrecpsq_f16 (float16x8_t, float16x8_t)
166 {
167 return vdupq_n_f16(0);
168 }
169
vmaxq_f16(float16x8_t,float16x8_t)170 inline float16x8_t vmaxq_f16 (float16x8_t, float16x8_t)
171 {
172 return vdupq_n_f16(0);
173 }
174
vminq_f16(float16x8_t,float16x8_t)175 inline float16x8_t vminq_f16 (float16x8_t, float16x8_t)
176 {
177 return vdupq_n_f16(0);
178 }
179
vcltq_f16(float16x8_t,float16x8_t)180 inline uint16x8_t vcltq_f16(float16x8_t, float16x8_t)
181 {
182 return vdupq_n_u16(0);
183 }
184
185