1 /*
2
3 Copyright (c) 2009, 2010, 2011, 2013 STMicroelectronics
4 Written by Christophe Lyon
5
6 Permission is hereby granted, free of charge, to any person obtaining a copy
7 of this software and associated documentation files (the "Software"), to deal
8 in the Software without restriction, including without limitation the rights
9 to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 copies of the Software, and to permit persons to whom the Software is
11 furnished to do so, subject to the following conditions:
12
13 The above copyright notice and this permission notice shall be included in
14 all copies or substantial portions of the Software.
15
16 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 THE SOFTWARE.
23
24 */
25
26 #if defined(__arm__) || defined(__aarch64__)
27 #include <arm_neon.h>
28 #else
29 #include "stm-arm-neon.h"
30 #endif
31
32 #include "stm-arm-neon-ref.h"
33
exec_vldX_lane(void)34 void exec_vldX_lane (void)
35 {
36 /* In this case, input variables are arrays of vectors */
37 #define DECL_VLDX_LANE(T1, W, N, X) \
38 VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(vector, T1, W, N, X); \
39 VECT_ARRAY_TYPE(T1, W, N, X) VECT_ARRAY_VAR(vector_src, T1, W, N, X); \
40 VECT_VAR_DECL(result_bis_##X, T1, W, N)[X * N]
41
42 /* We need to use a temporary result buffer (result_bis), because
43 the one used for other tests is not large enough. A subset of the
44 result data is moved from result_bis to result, and it is this
45 subset which is used to check the actual behaviour. The next
46 macro enables to move another chunk of data from result_bis to
47 result. */
48 #define TEST_VLDX_LANE(Q, T1, T2, W, N, X, L) \
49 memset (VECT_VAR(buffer_src, T1, W, N), 0xAA, \
50 sizeof(VECT_VAR(buffer_src, T1, W, N))); \
51 \
52 VECT_ARRAY_VAR(vector_src, T1, W, N, X) = \
53 vld##X##Q##_##T2##W(VECT_VAR(buffer_src, T1, W, N)); \
54 \
55 VECT_ARRAY_VAR(vector, T1, W, N, X) = \
56 /* Use dedicated init buffer, of size X */ \
57 vld##X##Q##_lane_##T2##W(VECT_VAR(buffer_vld##X##_lane, T1, W, X), \
58 VECT_ARRAY_VAR(vector_src, T1, W, N, X), \
59 L); \
60 vst##X##Q##_##T2##W(VECT_VAR(result_bis_##X, T1, W, N), \
61 VECT_ARRAY_VAR(vector, T1, W, N, X)); \
62 memcpy(VECT_VAR(result, T1, W, N), VECT_VAR(result_bis_##X, T1, W, N), \
63 sizeof(VECT_VAR(result, T1, W, N)))
64
65 /* Overwrite "result" with the contents of "result_bis"[Y] */
66 #define TEST_EXTRA_CHUNK(T1, W, N, X, Y) \
67 memcpy(VECT_VAR(result, T1, W, N), \
68 &(VECT_VAR(result_bis_##X, T1, W, N)[Y*N]), \
69 sizeof(VECT_VAR(result, T1, W, N)));
70
71 /* With ARM RVCT, we need to declare variables before any executable
72 statement */
73
74 /* We need all variants in 64 bits, but there is no 64x2 variant */
75 #define DECL_ALL_VLDX_LANE(X) \
76 DECL_VLDX_LANE(int, 8, 8, X); \
77 DECL_VLDX_LANE(int, 16, 4, X); \
78 DECL_VLDX_LANE(int, 32, 2, X); \
79 DECL_VLDX_LANE(uint, 8, 8, X); \
80 DECL_VLDX_LANE(uint, 16, 4, X); \
81 DECL_VLDX_LANE(uint, 32, 2, X); \
82 DECL_VLDX_LANE(poly, 8, 8, X); \
83 DECL_VLDX_LANE(poly, 16, 4, X); \
84 DECL_VLDX_LANE(int, 16, 8, X); \
85 DECL_VLDX_LANE(int, 32, 4, X); \
86 DECL_VLDX_LANE(uint, 16, 8, X); \
87 DECL_VLDX_LANE(uint, 32, 4, X); \
88 DECL_VLDX_LANE(poly, 16, 8, X); \
89 DECL_VLDX_LANE(float, 32, 2, X); \
90 DECL_VLDX_LANE(float, 32, 4, X)
91
92 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
93 #define DECL_ALL_VLDX_LANE_FP16(X) \
94 DECL_VLDX_LANE(float, 16, 4, X); \
95 DECL_VLDX_LANE(float, 16, 8, X)
96 #endif
97
98 /* Add some padding to try to catch out of bound accesses. */
99 /* Use an array instead of a plain char to comply with rvct
100 constraints. */
101 #define ARRAY1(V, T, W, N) VECT_VAR_DECL(V,T,W,N)[1]={42}
102 #define DUMMY_ARRAY(V, T, W, N, L) \
103 VECT_VAR_DECL(V,T,W,N)[N*L]={0}; \
104 ARRAY1(V##_pad,T,W,N)
105
106 /* Use the same lanes regardless of the size of the array (X), for
107 simplicity */
108 #define TEST_ALL_VLDX_LANE(X) \
109 TEST_VLDX_LANE(, int, s, 8, 8, X, 7); \
110 TEST_VLDX_LANE(, int, s, 16, 4, X, 2); \
111 TEST_VLDX_LANE(, int, s, 32, 2, X, 0); \
112 TEST_VLDX_LANE(, uint, u, 8, 8, X, 4); \
113 TEST_VLDX_LANE(, uint, u, 16, 4, X, 3); \
114 TEST_VLDX_LANE(, uint, u, 32, 2, X, 1); \
115 TEST_VLDX_LANE(, poly, p, 8, 8, X, 4); \
116 TEST_VLDX_LANE(, poly, p, 16, 4, X, 3); \
117 TEST_VLDX_LANE(q, int, s, 16, 8, X, 6); \
118 TEST_VLDX_LANE(q, int, s, 32, 4, X, 2); \
119 TEST_VLDX_LANE(q, uint, u, 16, 8, X, 5); \
120 TEST_VLDX_LANE(q, uint, u, 32, 4, X, 0); \
121 TEST_VLDX_LANE(q, poly, p, 16, 8, X, 5); \
122 TEST_VLDX_LANE(, float, f, 32, 2, X, 0); \
123 TEST_VLDX_LANE(q, float, f, 32, 4, X, 2)
124
125 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
126 #define TEST_ALL_VLDX_LANE_FP16(X) \
127 TEST_VLDX_LANE(, float, f, 16, 4, X, 0); \
128 TEST_VLDX_LANE(q, float, f, 16, 8, X, 2)
129 #endif
130
131 #define TEST_ALL_EXTRA_CHUNKS(X, Y) \
132 TEST_EXTRA_CHUNK(int, 8, 8, X, Y); \
133 TEST_EXTRA_CHUNK(int, 16, 4, X, Y); \
134 TEST_EXTRA_CHUNK(int, 32, 2, X, Y); \
135 TEST_EXTRA_CHUNK(uint, 8, 8, X, Y); \
136 TEST_EXTRA_CHUNK(uint, 16, 4, X, Y); \
137 TEST_EXTRA_CHUNK(uint, 32, 2, X, Y); \
138 TEST_EXTRA_CHUNK(poly, 8, 8, X, Y); \
139 TEST_EXTRA_CHUNK(poly, 16, 4, X, Y); \
140 TEST_EXTRA_CHUNK(int, 16, 8, X, Y); \
141 TEST_EXTRA_CHUNK(int, 32, 4, X, Y); \
142 TEST_EXTRA_CHUNK(uint, 16, 8, X, Y); \
143 TEST_EXTRA_CHUNK(uint, 32, 4, X, Y); \
144 TEST_EXTRA_CHUNK(poly, 16, 8, X, Y); \
145 TEST_EXTRA_CHUNK(float, 32, 2, X, Y); \
146 TEST_EXTRA_CHUNK(float, 32, 4, X, Y)
147
148 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
149 #define TEST_ALL_EXTRA_CHUNKS_FP16(X, Y) \
150 TEST_EXTRA_CHUNK(float, 16, 4, X, Y); \
151 TEST_EXTRA_CHUNK(float, 16, 8, X, Y)
152 #endif
153
154 /* Declare the temporary buffers / variables */
155 DECL_ALL_VLDX_LANE(2);
156 DECL_ALL_VLDX_LANE(3);
157 DECL_ALL_VLDX_LANE(4);
158 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
159 DECL_ALL_VLDX_LANE_FP16(2);
160 DECL_ALL_VLDX_LANE_FP16(3);
161 DECL_ALL_VLDX_LANE_FP16(4);
162 #endif
163
164 /* Define dummy input arrays, large enough for x4 vectors */
165 DUMMY_ARRAY(buffer_src, int, 8, 8, 4);
166 DUMMY_ARRAY(buffer_src, int, 16, 4, 4);
167 DUMMY_ARRAY(buffer_src, int, 32, 2, 4);
168 DUMMY_ARRAY(buffer_src, uint, 8, 8, 4);
169 DUMMY_ARRAY(buffer_src, uint, 16, 4, 4);
170 DUMMY_ARRAY(buffer_src, uint, 32, 2, 4);
171 DUMMY_ARRAY(buffer_src, poly, 8, 8, 4);
172 DUMMY_ARRAY(buffer_src, poly, 16, 4, 4);
173 DUMMY_ARRAY(buffer_src, int, 16, 8, 4);
174 DUMMY_ARRAY(buffer_src, int, 32, 4, 4);
175 DUMMY_ARRAY(buffer_src, uint, 16, 8, 4);
176 DUMMY_ARRAY(buffer_src, uint, 32, 4, 4);
177 DUMMY_ARRAY(buffer_src, poly, 16, 8, 4);
178 DUMMY_ARRAY(buffer_src, float, 32, 2, 4);
179 DUMMY_ARRAY(buffer_src, float, 32, 4, 4);
180 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
181 DUMMY_ARRAY(buffer_src, float, 16, 4, 4);
182 DUMMY_ARRAY(buffer_src, float, 16, 8, 4);
183 #endif
184
185 /* Check vld2_lane/vld2q_lane */
186 clean_results ();
187 #define TEST_MSG "VLD2_LANE/VLD2Q_LANE"
188 TEST_ALL_VLDX_LANE(2);
189 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
190 TEST_ALL_VLDX_LANE_FP16(2);
191 #endif
192 dump_results_hex2 (TEST_MSG, " chunk 0");
193 TEST_ALL_EXTRA_CHUNKS(2, 1);
194 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
195 TEST_ALL_EXTRA_CHUNKS_FP16(2, 1);
196 #endif
197 dump_results_hex2 (TEST_MSG, " chunk 1");
198
199 /* Check vld3_lane/vld3q_lane */
200 clean_results ();
201 #undef TEST_MSG
202 #define TEST_MSG "VLD3_LANE/VLD3Q_LANE"
203 TEST_ALL_VLDX_LANE(3);
204 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
205 TEST_ALL_VLDX_LANE_FP16(3);
206 #endif
207 dump_results_hex2 (TEST_MSG, " chunk 0");
208 TEST_ALL_EXTRA_CHUNKS(3, 1);
209 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
210 TEST_ALL_EXTRA_CHUNKS_FP16(3, 1);
211 #endif
212 dump_results_hex2 (TEST_MSG, " chunk 1");
213 TEST_ALL_EXTRA_CHUNKS(3, 2);
214 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
215 TEST_ALL_EXTRA_CHUNKS_FP16(3, 2);
216 #endif
217 dump_results_hex2 (TEST_MSG, " chunk 2");
218
219 /* Check vld4_lane/vld4q_lane */
220 clean_results ();
221 #undef TEST_MSG
222 #define TEST_MSG "VLD4_LANE/VLD4Q_LANE"
223 TEST_ALL_VLDX_LANE(4);
224 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
225 TEST_ALL_VLDX_LANE_FP16(4);
226 #endif
227 dump_results_hex2 (TEST_MSG, " chunk 0");
228 TEST_ALL_EXTRA_CHUNKS(4, 1);
229 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
230 TEST_ALL_EXTRA_CHUNKS_FP16(4, 1);
231 #endif
232 dump_results_hex2 (TEST_MSG, " chunk 1");
233 TEST_ALL_EXTRA_CHUNKS(4, 2);
234 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
235 TEST_ALL_EXTRA_CHUNKS_FP16(4, 2);
236 #endif
237 dump_results_hex2 (TEST_MSG, " chunk 2");
238 TEST_ALL_EXTRA_CHUNKS(4, 3);
239 #if defined(__ARM_FP16_FORMAT_IEEE) && ( ((__ARM_FP & 0x2) != 0) || ((__ARM_NEON_FP16_INTRINSICS & 1) != 0) )
240 TEST_ALL_EXTRA_CHUNKS_FP16(4, 3);
241 #endif
242 dump_results_hex2 (TEST_MSG, " chunk 3");
243 }
244