1 /*
2 * Copyright © 2018, VideoLAN and dav1d authors
3 * Copyright © 2018, Two Orioles, LLC
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 *
9 * 1. Redistributions of source code must retain the above copyright notice, this
10 * list of conditions and the following disclaimer.
11 *
12 * 2. Redistributions in binary form must reproduce the above copyright notice,
13 * this list of conditions and the following disclaimer in the documentation
14 * and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
18 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
19 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
22 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
23 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
25 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 */
27
28 #ifndef DAV1D_TESTS_CHECKASM_CHECKASM_H
29 #define DAV1D_TESTS_CHECKASM_CHECKASM_H
30
31 #include "config.h"
32
33 #include <stdint.h>
34 #include <stdlib.h>
35
36 #ifdef _WIN32
37 #include <windows.h>
38 #if ARCH_X86_32
39 #include <setjmp.h>
40 typedef jmp_buf checkasm_context;
41 #define checkasm_save_context() setjmp(checkasm_context_buf)
42 #define checkasm_load_context() longjmp(checkasm_context_buf, 1)
43 #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
44 /* setjmp/longjmp on Windows on architectures using SEH (all except x86_32)
45 * will try to use SEH to unwind the stack, which doesn't work for assembly
46 * functions without unwind information. */
47 typedef struct { CONTEXT c; int status; } checkasm_context;
48 #define checkasm_save_context() \
49 (checkasm_context_buf.status = 0, \
50 RtlCaptureContext(&checkasm_context_buf.c), \
51 checkasm_context_buf.status)
52 #define checkasm_load_context() \
53 (checkasm_context_buf.status = 1, \
54 RtlRestoreContext(&checkasm_context_buf.c, NULL))
55 #else
56 typedef void* checkasm_context;
57 #define checkasm_save_context() 0
58 #define checkasm_load_context() do {} while (0)
59 #endif
60 #else
61 #include <setjmp.h>
62 typedef sigjmp_buf checkasm_context;
63 #define checkasm_save_context() sigsetjmp(checkasm_context_buf, 1)
64 #define checkasm_load_context() siglongjmp(checkasm_context_buf, 1)
65 #endif
66
67 #include "include/common/attributes.h"
68 #include "include/common/bitdepth.h"
69 #include "include/common/intops.h"
70
71 int xor128_rand(void);
72 #define rnd xor128_rand
73
74 #define decl_check_bitfns(name) \
75 name##_8bpc(void); \
76 name##_16bpc(void)
77
78 void checkasm_check_msac(void);
79 void checkasm_check_pal(void);
80 void checkasm_check_refmvs(void);
81 decl_check_bitfns(void checkasm_check_cdef);
82 decl_check_bitfns(void checkasm_check_filmgrain);
83 decl_check_bitfns(void checkasm_check_ipred);
84 decl_check_bitfns(void checkasm_check_itx);
85 decl_check_bitfns(void checkasm_check_loopfilter);
86 decl_check_bitfns(void checkasm_check_looprestoration);
87 decl_check_bitfns(void checkasm_check_mc);
88
89 void *checkasm_check_func(void *func, const char *name, ...);
90 int checkasm_bench_func(void);
91 int checkasm_fail_func(const char *msg, ...);
92 void checkasm_update_bench(int iterations, uint64_t cycles);
93 void checkasm_report(const char *name, ...);
94 void checkasm_set_signal_handler_state(int enabled);
95 void checkasm_handle_signal(void);
96 extern checkasm_context checkasm_context_buf;
97
98 /* float compare utilities */
99 int float_near_ulp(float a, float b, unsigned max_ulp);
100 int float_near_abs_eps(float a, float b, float eps);
101 int float_near_abs_eps_ulp(float a, float b, float eps, unsigned max_ulp);
102 int float_near_ulp_array(const float *a, const float *b, unsigned max_ulp,
103 int len);
104 int float_near_abs_eps_array(const float *a, const float *b, float eps,
105 int len);
106 int float_near_abs_eps_array_ulp(const float *a, const float *b, float eps,
107 unsigned max_ulp, int len);
108
109 #define BENCH_RUNS (1 << 12) /* Trade-off between accuracy and speed */
110
111 /* Decide whether or not the specified function needs to be tested */
112 #define check_func(func, ...)\
113 (func_ref = checkasm_check_func((func_new = func), __VA_ARGS__))
114
115 /* Declare the function prototype. The first argument is the return value,
116 * the remaining arguments are the function parameters. Naming parameters
117 * is optional. */
118 #define declare_func(ret, ...)\
119 declare_new(ret, __VA_ARGS__)\
120 void *func_ref, *func_new;\
121 typedef ret func_type(__VA_ARGS__);\
122 if (checkasm_save_context()) checkasm_handle_signal()
123
124 /* Indicate that the current test has failed */
125 #define fail() checkasm_fail_func("%s:%d", __FILE__, __LINE__)
126
127 /* Print the test outcome */
128 #define report checkasm_report
129
130 /* Call the reference function */
131 #define call_ref(...)\
132 (checkasm_set_signal_handler_state(1),\
133 ((func_type *)func_ref)(__VA_ARGS__));\
134 checkasm_set_signal_handler_state(0)
135
136 #if HAVE_ASM
137 #if ARCH_X86
138 #if defined(_MSC_VER) && !defined(__clang__)
139 #include <intrin.h>
140 #define readtime() (_mm_lfence(), __rdtsc())
141 #else
readtime(void)142 static inline uint64_t readtime(void) {
143 uint32_t eax, edx;
144 __asm__ __volatile__("lfence\nrdtsc" : "=a"(eax), "=d"(edx));
145 return (((uint64_t)edx) << 32) | eax;
146 }
147 #define readtime readtime
148 #endif
149 #elif CONFIG_MACOS_KPERF
150 uint64_t checkasm_kperf_cycles(void);
151 #define readtime() checkasm_kperf_cycles()
152 #elif (ARCH_AARCH64 || ARCH_ARM) && defined(__APPLE__)
153 #include <mach/mach_time.h>
154 #define readtime() mach_absolute_time()
155 #elif ARCH_AARCH64
156 #ifdef _MSC_VER
157 #include <windows.h>
158 #define readtime() (_InstructionSynchronizationBarrier(), ReadTimeStampCounter())
159 #else
readtime(void)160 static inline uint64_t readtime(void) {
161 uint64_t cycle_counter;
162 /* This requires enabling user mode access to the cycle counter (which
163 * can only be done from kernel space).
164 * This could also read cntvct_el0 instead of pmccntr_el0; that register
165 * might also be readable (depending on kernel version), but it has much
166 * worse precision (it's a fixed 50 MHz timer). */
167 __asm__ __volatile__("isb\nmrs %0, pmccntr_el0"
168 : "=r"(cycle_counter)
169 :: "memory");
170 return cycle_counter;
171 }
172 #define readtime readtime
173 #endif
174 #elif ARCH_ARM && !defined(_MSC_VER) && __ARM_ARCH >= 7
readtime(void)175 static inline uint64_t readtime(void) {
176 uint32_t cycle_counter;
177 /* This requires enabling user mode access to the cycle counter (which
178 * can only be done from kernel space). */
179 __asm__ __volatile__("isb\nmrc p15, 0, %0, c9, c13, 0"
180 : "=r"(cycle_counter)
181 :: "memory");
182 return cycle_counter;
183 }
184 #define readtime readtime
185 #elif ARCH_PPC64LE
readtime(void)186 static inline uint64_t readtime(void) {
187 uint32_t tbu, tbl, temp;
188
189 __asm__ __volatile__(
190 "1:\n"
191 "mfspr %2,269\n"
192 "mfspr %0,268\n"
193 "mfspr %1,269\n"
194 "cmpw %2,%1\n"
195 "bne 1b\n"
196 : "=r"(tbl), "=r"(tbu), "=r"(temp)
197 :
198 : "cc");
199
200 return (((uint64_t)tbu) << 32) | (uint64_t)tbl;
201 }
202 #define readtime readtime
203 #elif ARCH_RISCV
204 #include <time.h>
clock_gettime_nsec(void)205 static inline uint64_t clock_gettime_nsec(void) {
206 struct timespec ts;
207 clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
208 return ((uint64_t)ts.tv_sec*1000000000u) + (uint64_t)ts.tv_nsec;
209 }
210 #define readtime clock_gettime_nsec
211 #elif ARCH_LOONGARCH
readtime(void)212 static inline uint64_t readtime(void) {
213 #if ARCH_LOONGARCH64
214 uint64_t a, id;
215 __asm__ __volatile__("rdtime.d %0, %1"
216 : "=r"(a), "=r"(id)
217 :: );
218 return a;
219 #else
220 uint32_t a, id;
221 __asm__ __volatile__("rdtimel.w %0, %1"
222 : "=r"(a), "=r"(id)
223 :: );
224 return (uint64_t)a;
225 #endif
226 }
227 #define readtime readtime
228 #endif
229
230 /* Verifies that clobbered callee-saved registers
231 * are properly saved and restored */
232 void checkasm_checked_call(void *func, ...);
233
234 #if ARCH_X86_64
235 /* YMM and ZMM registers on x86 are turned off to save power when they haven't
236 * been used for some period of time. When they are used there will be a
237 * "warmup" period during which performance will be reduced and inconsistent
238 * which is problematic when trying to benchmark individual functions. We can
239 * work around this by periodically issuing "dummy" instructions that uses
240 * those registers to keep them powered on. */
241 void checkasm_simd_warmup(void);
242
243 /* The upper 32 bits of 32-bit data types are undefined when passed as function
244 * parameters. In practice those bits usually end up being zero which may hide
245 * certain bugs, such as using a register containing undefined bits as a pointer
246 * offset, so we want to intentionally clobber those bits with junk to expose
247 * any issues. The following set of macros automatically calculates a bitmask
248 * specifying which parameters should have their upper halves clobbered. */
249 #ifdef _WIN32
250 /* Integer and floating-point parameters share "register slots". */
251 #define IGNORED_FP_ARGS 0
252 #else
253 /* Up to 8 floating-point parameters are passed in XMM registers, which are
254 * handled orthogonally from integer parameters passed in GPR registers. */
255 #define IGNORED_FP_ARGS 8
256 #endif
257 #ifdef HAVE_C11_GENERIC
258 #define clobber_type(arg) _Generic((void (*)(void*, arg))NULL,\
259 void (*)(void*, int32_t ): clobber_mask |= 1 << mpos++,\
260 void (*)(void*, uint32_t): clobber_mask |= 1 << mpos++,\
261 void (*)(void*, float ): mpos += (fp_args++ >= IGNORED_FP_ARGS),\
262 void (*)(void*, double ): mpos += (fp_args++ >= IGNORED_FP_ARGS),\
263 default: mpos++)
264 #define init_clobber_mask(a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, ...)\
265 unsigned clobber_mask = 0;\
266 {\
267 int mpos = 0, fp_args = 0;\
268 clobber_type(a); clobber_type(b); clobber_type(c); clobber_type(d);\
269 clobber_type(e); clobber_type(f); clobber_type(g); clobber_type(h);\
270 clobber_type(i); clobber_type(j); clobber_type(k); clobber_type(l);\
271 clobber_type(m); clobber_type(n); clobber_type(o); clobber_type(p);\
272 }
273 #else
274 /* Skip parameter clobbering on compilers without support for _Generic() */
275 #define init_clobber_mask(...) unsigned clobber_mask = 0
276 #endif
277 #define declare_new(ret, ...)\
278 ret (*checked_call)(__VA_ARGS__, int, int, int, int, int, int, int,\
279 int, int, int, int, int, int, int, int, int,\
280 void*, unsigned) =\
281 (void*)checkasm_checked_call;\
282 init_clobber_mask(__VA_ARGS__, void*, void*, void*, void*,\
283 void*, void*, void*, void*, void*, void*,\
284 void*, void*, void*, void*, void*);
285 #define call_new(...)\
286 (checkasm_set_signal_handler_state(1),\
287 checkasm_simd_warmup(),\
288 checked_call(__VA_ARGS__, 16, 15, 14, 13, 12, 11, 10, 9, 8,\
289 7, 6, 5, 4, 3, 2, 1, func_new, clobber_mask));\
290 checkasm_set_signal_handler_state(0)
291 #elif ARCH_X86_32
292 #define declare_new(ret, ...)\
293 ret (*checked_call)(void *, __VA_ARGS__, int, int, int, int, int, int,\
294 int, int, int, int, int, int, int, int, int) =\
295 (void *)checkasm_checked_call;
296 #define call_new(...)\
297 (checkasm_set_signal_handler_state(1),\
298 checked_call(func_new, __VA_ARGS__, 15, 14, 13, 12,\
299 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1));\
300 checkasm_set_signal_handler_state(0)
301 #elif ARCH_ARM
302 /* Use a dummy argument, to offset the real parameters by 2, not only 1.
303 * This makes sure that potential 8-byte-alignment of parameters is kept
304 * the same even when the extra parameters have been removed. */
305 void checkasm_checked_call_vfp(void *func, int dummy, ...);
306 #define declare_new(ret, ...)\
307 ret (*checked_call)(void *, int dummy, __VA_ARGS__,\
308 int, int, int, int, int, int, int, int,\
309 int, int, int, int, int, int, int) =\
310 (void *)checkasm_checked_call_vfp;
311 #define call_new(...)\
312 (checkasm_set_signal_handler_state(1),\
313 checked_call(func_new, 0, __VA_ARGS__, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0));\
314 checkasm_set_signal_handler_state(0)
315 #elif ARCH_AARCH64 && !defined(__APPLE__)
316 void checkasm_stack_clobber(uint64_t clobber, ...);
317 #define declare_new(ret, ...)\
318 ret (*checked_call)(void *, int, int, int, int, int, int, int,\
319 __VA_ARGS__, int, int, int, int, int, int, int, int,\
320 int, int, int, int, int, int, int) =\
321 (void *)checkasm_checked_call;
322 #define CLOB (UINT64_C(0xdeadbeefdeadbeef))
323 #define call_new(...)\
324 (checkasm_set_signal_handler_state(1),\
325 checkasm_stack_clobber(CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
326 CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
327 CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
328 CLOB, CLOB, CLOB, CLOB, CLOB),\
329 checked_call(func_new, 0, 0, 0, 0, 0, 0, 0, __VA_ARGS__,\
330 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0));\
331 checkasm_set_signal_handler_state(0)
332 #elif ARCH_RISCV
333 #define declare_new(ret, ...)\
334 ret (*checked_call)(void *, int, int, int, int, int, int, int,\
335 __VA_ARGS__, int, int, int, int, int, int, int, int,\
336 int, int, int, int, int, int, int) =\
337 (void *)checkasm_checked_call;
338 #define call_new(...)\
339 (checkasm_set_signal_handler_state(1),\
340 checked_call(func_new, 0, 0, 0, 0, 0, 0, 0, __VA_ARGS__,\
341 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0));\
342 checkasm_set_signal_handler_state(0)
343 #else
344 #define declare_new(ret, ...)
345 #define call_new(...)\
346 (checkasm_set_signal_handler_state(1),\
347 ((func_type *)func_new)(__VA_ARGS__));\
348 checkasm_set_signal_handler_state(0)
349 #endif
350 #else /* HAVE_ASM */
351 #define declare_new(ret, ...)
352 /* Call the function */
353 #define call_new(...)\
354 (checkasm_set_signal_handler_state(1),\
355 ((func_type *)func_new)(__VA_ARGS__));\
356 checkasm_set_signal_handler_state(0)
357 #endif /* HAVE_ASM */
358
359 /* Benchmark the function */
360 #ifdef readtime
361 #define bench_new(...)\
362 do {\
363 if (checkasm_bench_func()) {\
364 func_type *const tfunc = func_new;\
365 checkasm_set_signal_handler_state(1);\
366 uint64_t tsum = 0;\
367 int tcount = 0;\
368 for (int ti = 0; ti < BENCH_RUNS; ti++) {\
369 uint64_t t = readtime();\
370 int talt = 0; (void)talt;\
371 tfunc(__VA_ARGS__);\
372 talt = 1;\
373 tfunc(__VA_ARGS__);\
374 talt = 0;\
375 tfunc(__VA_ARGS__);\
376 talt = 1;\
377 tfunc(__VA_ARGS__);\
378 t = readtime() - t;\
379 if (t*tcount <= tsum*4 && ti > 0) {\
380 tsum += t;\
381 tcount++;\
382 }\
383 }\
384 checkasm_set_signal_handler_state(0);\
385 checkasm_update_bench(tcount, tsum);\
386 } else {\
387 const int talt = 0; (void)talt;\
388 call_new(__VA_ARGS__);\
389 }\
390 } while (0)
391 #else
392 #define bench_new(...) do {} while (0)
393 #endif
394
395 /* Alternates between two pointers. Intended to be used within bench_new()
396 * calls for functions which modifies their input buffer(s) to ensure that
397 * throughput, and not latency, is measured. */
398 #define alternate(a, b) (talt ? (b) : (a))
399
400 #define ROUND_UP(x,a) (((x)+((a)-1)) & ~((a)-1))
401 #define PIXEL_RECT(name, w, h) \
402 ALIGN_STK_64(pixel, name##_buf, ((h)+32)*(ROUND_UP(w,64)+64) + 64,); \
403 ptrdiff_t name##_stride = sizeof(pixel)*(ROUND_UP(w,64)+64); \
404 (void)name##_stride; \
405 pixel *name = name##_buf + (ROUND_UP(w,64)+64)*16 + 64
406
407 #define CLEAR_PIXEL_RECT(name) \
408 memset(name##_buf, 0x99, sizeof(name##_buf)) \
409
410 #define DECL_CHECKASM_CHECK_FUNC(type) \
411 int checkasm_check_##type(const char *const file, const int line, \
412 const type *const buf1, const ptrdiff_t stride1, \
413 const type *const buf2, const ptrdiff_t stride2, \
414 const int w, const int h, const char *const name, \
415 const int align_w, const int align_h, \
416 const int padding)
417
418 DECL_CHECKASM_CHECK_FUNC(int8_t);
419 DECL_CHECKASM_CHECK_FUNC(int16_t);
420 DECL_CHECKASM_CHECK_FUNC(int32_t);
421 DECL_CHECKASM_CHECK_FUNC(uint8_t);
422 DECL_CHECKASM_CHECK_FUNC(uint16_t);
423 DECL_CHECKASM_CHECK_FUNC(uint32_t);
424
425 #define CONCAT(a,b) a ## b
426
427 #define checkasm_check2(prefix, ...) CONCAT(checkasm_check_, prefix)(__FILE__, __LINE__, __VA_ARGS__)
428 #define checkasm_check(prefix, ...) checkasm_check2(prefix, __VA_ARGS__, 0, 0, 0)
429
430 #ifdef BITDEPTH
431 #define checkasm_check_pixel(...) checkasm_check(PIXEL_TYPE, __VA_ARGS__)
432 #define checkasm_check_pixel_padded(...) checkasm_check2(PIXEL_TYPE, __VA_ARGS__, 1, 1, 8)
433 #define checkasm_check_pixel_padded_align(...) checkasm_check2(PIXEL_TYPE, __VA_ARGS__, 8)
434 #define checkasm_check_coef(...) checkasm_check(COEF_TYPE, __VA_ARGS__)
435 #endif
436
437 #endif /* DAV1D_TESTS_CHECKASM_CHECKASM_H */
438