1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (c) 2017-2018 Richard Palethorpe <rpalethorpe@suse.com>
4 */
5 /**
6 * @file tst_fuzzy_sync.h
7 * Fuzzy Synchronisation - abbreviated to fzsync
8 *
9 * This library is intended to help reproduce race conditions by synchronising
10 * two threads at a given place by marking the range a race may occur
11 * in. Because the exact place where any race occurs is within the kernel,
12 * and therefore impossible to mark accurately, the library may add randomised
13 * delays to either thread in order to help find the exact race timing.
14 *
15 * Currently only two way races are explicitly supported, that is races
16 * involving two threads or processes. We refer to the main test thread as
17 * thread A and the child thread as thread B.
18 *
19 * In each thread you need a simple while- or for-loop which the tst_fzsync_*
20 * functions are called in. In the simplest case thread A will look something
21 * like:
22 *
23 * tst_fzsync_pair_reset(&pair, run_thread_b);
24 * while (tst_fzsync_run_a(&pair)) {
25 * // Perform some setup which must happen before the race
26 * tst_fzsync_start_race_a(&pair);
27 * // Do some dodgy syscall
28 * tst_fzsync_end_race_a(&pair);
29 * }
30 *
31 * Then in thread B (run_thread_b):
32 *
33 * while (tst_fzsync_run_b(&pair)) {
34 * tst_fzsync_start_race_b(&pair);
35 * // Do something which can race with the dodgy syscall in A
36 * tst_fzsync_end_race_b(&pair)
37 * }
38 *
39 * The calls to tst_fzsync_start/end_race and tst_fzsync_run_a/b block (at
40 * least) until both threads have enter them. These functions can only be
41 * called once for each iteration, but further synchronisation points can be
42 * added by calling tst_fzsync_wait_a() and tst_fzsync_wait_b() in each
43 * thread.
44 *
45 * The execution of the loops in threads A and B are bounded by both iteration
46 * count and time. A slow machine is likely to be limited by time and a fast
47 * one by iteration count. The user can use the -i parameter to run the test
48 * multiple times or LTP_TIMEOUT_MUL to give the test more time.
49 *
50 * It is possible to use the library just for tst_fzsync_pair_wait() to get a
51 * basic spin wait. However if you are actually testing a race condition then
52 * it is recommended to use tst_fzsync_start_race_a/b even if the
53 * randomisation is not needed. It provides some semantic information which
54 * may be useful in the future.
55 *
56 * For a usage example see testcases/cve/cve-2016-7117.c or just run
57 * 'git grep tst_fuzzy_sync.h'
58 *
59 * @sa tst_fzsync_pair
60 */
61
62 #include <sys/time.h>
63 #include <time.h>
64 #include <math.h>
65 #include <stdlib.h>
66 #include <pthread.h>
67 #include "tst_atomic.h"
68 #include "tst_timer.h"
69 #include "tst_safe_pthread.h"
70
71 #ifndef TST_FUZZY_SYNC_H__
72 #define TST_FUZZY_SYNC_H__
73
74 /* how much of exec time is sampling allowed to take */
75 #define SAMPLING_SLICE 0.5f
76
77 /** Some statistics for a variable */
78 struct tst_fzsync_stat {
79 float avg;
80 float avg_dev;
81 float dev_ratio;
82 };
83
84 /**
85 * The state of a two way synchronisation or race.
86 *
87 * This contains all the necessary state for approximately synchronising two
88 * sections of code in different threads.
89 *
90 * Some of the fields can be configured before calling
91 * tst_fzsync_pair_reset(), however this is mainly for debugging purposes. If
92 * a test requires one of the parameters to be modified, we should consider
93 * finding a way of automatically selecting an appropriate value at runtime.
94 *
95 * Internal fields should only be accessed by library functions.
96 */
97 struct tst_fzsync_pair {
98 /**
99 * The rate at which old diff samples are forgotten
100 *
101 * Defaults to 0.25.
102 */
103 float avg_alpha;
104 /** Internal; Thread A start time */
105 struct timespec a_start;
106 /** Internal; Thread B start time */
107 struct timespec b_start;
108 /** Internal; Thread A end time */
109 struct timespec a_end;
110 /** Internal; Thread B end time */
111 struct timespec b_end;
112 /** Internal; Avg. difference between a_start and b_start */
113 struct tst_fzsync_stat diff_ss;
114 /** Internal; Avg. difference between a_start and a_end */
115 struct tst_fzsync_stat diff_sa;
116 /** Internal; Avg. difference between b_start and b_end */
117 struct tst_fzsync_stat diff_sb;
118 /** Internal; Avg. difference between a_end and b_end */
119 struct tst_fzsync_stat diff_ab;
120 /** Internal; Number of spins while waiting for the slower thread */
121 int spins;
122 struct tst_fzsync_stat spins_avg;
123 /**
124 * Internal; Number of spins to use in the delay.
125 *
126 * A negative value delays thread A and a positive delays thread B.
127 */
128 int delay;
129 int delay_bias;
130 /**
131 * Internal; The number of samples left or the sampling state.
132 *
133 * A positive value is the number of remaining mandatory
134 * samples. Zero or a negative indicate some other state.
135 */
136 int sampling;
137 /**
138 * The Minimum number of statistical samples which must be collected.
139 *
140 * The minimum number of iterations which must be performed before a
141 * random delay can be calculated. Defaults to 1024.
142 */
143 int min_samples;
144 /**
145 * The maximum allowed proportional average deviation.
146 *
147 * A value in the range (0, 1) which gives the maximum average
148 * deviation which must be attained before random delays can be
149 * calculated.
150 *
151 * It is a ratio of (average_deviation / total_time). The default is
152 * 0.1, so this allows an average deviation of at most 10%.
153 */
154 float max_dev_ratio;
155
156 /** Internal; Atomic counter used by fzsync_pair_wait() */
157 int a_cntr;
158 /** Internal; Atomic counter used by fzsync_pair_wait() */
159 int b_cntr;
160 /** Internal; Used by tst_fzsync_pair_exit() and fzsync_pair_wait() */
161 int exit;
162 /**
163 * The maximum desired execution time as a proportion of the timeout
164 *
165 * A value x so that 0 < x < 1 which decides how long the test should
166 * be run for (assuming the loop limit is not exceeded first).
167 *
168 * Defaults to 0.5 (~150 seconds with default timeout).
169 */
170 float exec_time_p;
171 /** Internal; The test time remaining on tst_fzsync_pair_reset() */
172 float exec_time_start;
173 /**
174 * The maximum number of iterations to execute during the test
175 *
176 * Defaults to a large number, but not too large.
177 */
178 int exec_loops;
179 /** Internal; The current loop index */
180 int exec_loop;
181 /** Internal; The second thread or 0 */
182 pthread_t thread_b;
183 };
184
185 #define CHK(param, low, hi, def) do { \
186 pair->param = (pair->param ? pair->param : def); \
187 if (pair->param < low) \
188 tst_brk(TBROK, #param " is less than the lower bound " #low); \
189 if (pair->param > hi) \
190 tst_brk(TBROK, #param " is more than the upper bound " #hi); \
191 } while (0)
192 /**
193 * Ensures that any Fuzzy Sync parameters are properly set
194 *
195 * @relates tst_fzsync_pair
196 *
197 * Usually called from the setup function, it sets default parameter values or
198 * validates any existing non-defaults.
199 *
200 * @sa tst_fzsync_pair_reset()
201 */
tst_fzsync_pair_init(struct tst_fzsync_pair * pair)202 static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
203 {
204 CHK(avg_alpha, 0, 1, 0.25);
205 CHK(min_samples, 20, INT_MAX, 1024);
206 CHK(max_dev_ratio, 0, 1, 0.1);
207 CHK(exec_time_p, 0, 1, 0.5);
208 CHK(exec_loops, 20, INT_MAX, 3000000);
209 }
210 #undef CHK
211
212 /**
213 * Exit and join thread B if necessary.
214 *
215 * @relates tst_fzsync_pair
216 *
217 * Call this from your cleanup function.
218 */
tst_fzsync_pair_cleanup(struct tst_fzsync_pair * pair)219 static void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
220 {
221 if (pair->thread_b) {
222 /* Revoke thread B if parent hits accidental break */
223 if (!pair->exit) {
224 tst_atomic_store(1, &pair->exit);
225 usleep(100000);
226 pthread_cancel(pair->thread_b);
227 }
228 SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
229 pair->thread_b = 0;
230 }
231 }
232
233 /** To store the run_b pointer and pass to tst_fzsync_thread_wrapper */
234 struct tst_fzsync_run_thread {
235 void *(*func)(void *);
236 void *arg;
237 };
238
239 /**
240 * Wrap run_b for tst_fzsync_pair_reset to enable pthread cancel
241 * at the start of the thread B.
242 */
tst_fzsync_thread_wrapper(void * run_thread)243 static void *tst_fzsync_thread_wrapper(void *run_thread)
244 {
245 struct tst_fzsync_run_thread t = *(struct tst_fzsync_run_thread *)run_thread;
246
247 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
248 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
249 return t.func(t.arg);
250 }
251
252 /**
253 * Zero some stat fields
254 *
255 * @relates tst_fzsync_stat
256 */
tst_init_stat(struct tst_fzsync_stat * s)257 static void tst_init_stat(struct tst_fzsync_stat *s)
258 {
259 s->avg = 0;
260 s->avg_dev = 0;
261 }
262
263 /**
264 * Reset or initialise fzsync.
265 *
266 * @relates tst_fzsync_pair
267 * @param pair The state structure initialised with TST_FZSYNC_PAIR_INIT.
268 * @param run_b The function defining thread B or NULL.
269 *
270 * Call this from your main test function (thread A), just before entering the
271 * main loop. It will (re)set any variables needed by fzsync and (re)start
272 * thread B using the function provided.
273 *
274 * If you need to use fork or clone to start the second thread/process then
275 * you can pass NULL to run_b and handle starting and stopping thread B
276 * yourself. You may need to place tst_fzsync_pair in some shared memory as
277 * well.
278 *
279 * @sa tst_fzsync_pair_init()
280 */
tst_fzsync_pair_reset(struct tst_fzsync_pair * pair,void * (* run_b)(void *))281 static void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
282 void *(*run_b)(void *))
283 {
284 tst_fzsync_pair_cleanup(pair);
285
286 tst_init_stat(&pair->diff_ss);
287 tst_init_stat(&pair->diff_sa);
288 tst_init_stat(&pair->diff_sb);
289 tst_init_stat(&pair->diff_ab);
290 tst_init_stat(&pair->spins_avg);
291 pair->delay = 0;
292 pair->sampling = pair->min_samples;
293
294 pair->exec_loop = 0;
295
296 pair->a_cntr = 0;
297 pair->b_cntr = 0;
298 pair->exit = 0;
299 if (run_b) {
300 static struct tst_fzsync_run_thread wrap_run_b;
301
302 wrap_run_b.func = run_b;
303 wrap_run_b.arg = NULL;
304 SAFE_PTHREAD_CREATE(&pair->thread_b, 0, tst_fzsync_thread_wrapper, &wrap_run_b);
305 }
306
307 pair->exec_time_start = (float)tst_timeout_remaining();
308 }
309
310 /**
311 * Print stat
312 *
313 * @relates tst_fzsync_stat
314 */
tst_fzsync_stat_info(struct tst_fzsync_stat stat,char * unit,char * name)315 static inline void tst_fzsync_stat_info(struct tst_fzsync_stat stat,
316 char *unit, char *name)
317 {
318 tst_res(TINFO,
319 "%1$-17s: { avg = %3$5.0f%2$s, avg_dev = %4$5.0f%2$s, dev_ratio = %5$.2f }",
320 name, unit, stat.avg, stat.avg_dev, stat.dev_ratio);
321 }
322
323 /**
324 * Print some synchronisation statistics
325 *
326 * @relates tst_fzsync_pair
327 */
tst_fzsync_pair_info(struct tst_fzsync_pair * pair)328 static void tst_fzsync_pair_info(struct tst_fzsync_pair *pair)
329 {
330 tst_res(TINFO, "loop = %d, delay_bias = %d",
331 pair->exec_loop, pair->delay_bias);
332 tst_fzsync_stat_info(pair->diff_ss, "ns", "start_a - start_b");
333 tst_fzsync_stat_info(pair->diff_sa, "ns", "end_a - start_a");
334 tst_fzsync_stat_info(pair->diff_sb, "ns", "end_b - start_b");
335 tst_fzsync_stat_info(pair->diff_ab, "ns", "end_a - end_b");
336 tst_fzsync_stat_info(pair->spins_avg, " ", "spins");
337 }
338
339 /** Wraps clock_gettime */
tst_fzsync_time(struct timespec * t)340 static inline void tst_fzsync_time(struct timespec *t)
341 {
342 #ifdef CLOCK_MONOTONIC_RAW
343 clock_gettime(CLOCK_MONOTONIC_RAW, t);
344 #else
345 clock_gettime(CLOCK_MONOTONIC, t);
346 #endif
347 }
348
349 /**
350 * Exponential moving average
351 *
352 * @param alpha The preference for recent samples over old ones.
353 * @param sample The current sample
354 * @param prev_avg The average of the all the previous samples
355 *
356 * @return The average including the current sample.
357 */
tst_exp_moving_avg(float alpha,float sample,float prev_avg)358 static inline float tst_exp_moving_avg(float alpha,
359 float sample,
360 float prev_avg)
361 {
362 return alpha * sample + (1.0 - alpha) * prev_avg;
363 }
364
365 /**
366 * Update a stat with a new sample
367 *
368 * @relates tst_fzsync_stat
369 */
tst_upd_stat(struct tst_fzsync_stat * s,float alpha,float sample)370 static inline void tst_upd_stat(struct tst_fzsync_stat *s,
371 float alpha,
372 float sample)
373 {
374 s->avg = tst_exp_moving_avg(alpha, sample, s->avg);
375 s->avg_dev = tst_exp_moving_avg(alpha,
376 fabs(s->avg - sample), s->avg_dev);
377 s->dev_ratio = fabs(s->avg ? s->avg_dev / s->avg : 0);
378 }
379
380 /**
381 * Update a stat with a new diff sample
382 *
383 * @relates tst_fzsync_stat
384 */
tst_upd_diff_stat(struct tst_fzsync_stat * s,float alpha,struct timespec t1,struct timespec t2)385 static inline void tst_upd_diff_stat(struct tst_fzsync_stat *s,
386 float alpha,
387 struct timespec t1,
388 struct timespec t2)
389 {
390 tst_upd_stat(s, alpha, tst_timespec_diff_ns(t1, t2));
391 }
392
393 /**
394 * Calculate various statistics and the delay
395 *
396 * This function helps create the fuzz in fuzzy sync. Imagine we have the
397 * following timelines in threads A and B:
398 *
399 * start_race_a
400 * ^ end_race_a (a)
401 * | ^
402 * | |
403 * - --+------------------------+-- - -
404 * | Syscall A | Thread A
405 * - --+------------------------+-- - -
406 * - --+----------------+-------+-- - -
407 * | Syscall B | spin | Thread B
408 * - --+----------------+-------+-- - -
409 * | |
410 * ^ ^
411 * start_race_b end_race_b
412 *
413 * Here we have synchronised the calls to syscall A and B with start_race_{a,
414 * b} so that they happen at approximately the same time in threads A and
415 * B. If the race condition occurs during the entry code for these two
416 * functions then we will quickly hit it. If it occurs during the exit code of
417 * B and mid way through A, then we will quickly hit it.
418 *
419 * However if the exit paths of A and B need to be aligned and (end_race_a -
420 * end_race_b) is large relative to the variation in call times, the
421 * probability of hitting the race condition is close to zero. To solve this
422 * scenario (and others) a randomised delay is introduced before the syscalls
423 * in A and B. Given enough time the following should happen where the exit
424 * paths are now synchronised:
425 *
426 * start_race_a
427 * ^ end_race_a (a)
428 * | ^
429 * | |
430 * - --+------------------------+-- - -
431 * | Syscall A | Thread A
432 * - --+------------------------+-- - -
433 * - --+-------+----------------+-- - -
434 * | delay | Syscall B | Thread B
435 * - --+-------+----------------+-- - -
436 * | |
437 * ^ ^
438 * start_race_b end_race_b
439 *
440 * The delay is not introduced immediately and the delay range is only
441 * calculated once the average relative deviation has dropped below some
442 * percentage of the total time.
443 *
444 * The delay range is chosen so that any point in Syscall A could be
445 * synchronised with any point in Syscall B using a value from the
446 * range. Because the delay range may be too large for a linear search, we use
447 * an evenly distributed random function to pick a value from it.
448 *
449 * The delay range goes from positive to negative. A negative delay will delay
450 * thread A and a positive one will delay thread B. The range is bounded by
451 * the point where the entry code to Syscall A is synchronised with the exit
452 * to Syscall B and the entry code to Syscall B is synchronised with the exit
453 * of A.
454 *
455 * In order to calculate the lower bound (the max delay of A) we can simply
456 * negate the execution time of Syscall B and convert it to a spin count. For
457 * the upper bound (the max delay of B), we just take the execution time of A
458 * and convert it to a spin count.
459 *
460 * In order to calculate spin count we need to know approximately how long a
461 * spin takes and divide the delay time with it. We find this by first
462 * counting how many spins one thread spends waiting for the other during
463 * end_race[1]. We also know when each syscall exits so we can take the
464 * difference between the exit times and divide it with the number of spins
465 * spent waiting.
466 *
467 * All the times and counts we use in the calculation are averaged over a
468 * variable number of iterations. There is an initial sampling period where we
469 * simply collect time and count samples then calculate their averages. When a
470 * minimum number of samples have been collected, and if the average deviation
471 * is below some proportion of the average sample magnitude, then the sampling
472 * period is ended. On all further iterations a random delay is calculated and
473 * applied, but the averages are not updated.
474 *
475 * [1] This assumes there is always a significant difference. The algorithm
476 * may fail to introduce a delay (when one is needed) in situations where
477 * Syscall A and B finish at approximately the same time.
478 *
479 * @relates tst_fzsync_pair
480 */
tst_fzsync_pair_update(struct tst_fzsync_pair * pair)481 static void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
482 {
483 float alpha = pair->avg_alpha;
484 float per_spin_time, time_delay;
485 float max_dev = pair->max_dev_ratio;
486 int over_max_dev;
487
488 pair->delay = pair->delay_bias;
489
490 over_max_dev = pair->diff_ss.dev_ratio > max_dev
491 || pair->diff_sa.dev_ratio > max_dev
492 || pair->diff_sb.dev_ratio > max_dev
493 || pair->diff_ab.dev_ratio > max_dev
494 || pair->spins_avg.dev_ratio > max_dev;
495
496 if (pair->sampling > 0 || over_max_dev) {
497 tst_upd_diff_stat(&pair->diff_ss, alpha,
498 pair->a_start, pair->b_start);
499 tst_upd_diff_stat(&pair->diff_sa, alpha,
500 pair->a_end, pair->a_start);
501 tst_upd_diff_stat(&pair->diff_sb, alpha,
502 pair->b_end, pair->b_start);
503 tst_upd_diff_stat(&pair->diff_ab, alpha,
504 pair->a_end, pair->b_end);
505 tst_upd_stat(&pair->spins_avg, alpha, pair->spins);
506 if (pair->sampling > 0 && --pair->sampling == 0) {
507 tst_res(TINFO, "Minimum sampling period ended");
508 tst_fzsync_pair_info(pair);
509 }
510 } else if (fabsf(pair->diff_ab.avg) >= 1) {
511 per_spin_time = fabsf(pair->diff_ab.avg) / MAX(pair->spins_avg.avg, 1.0f);
512 time_delay = drand48() * (pair->diff_sa.avg + pair->diff_sb.avg)
513 - pair->diff_sb.avg;
514 pair->delay += (int)(time_delay / per_spin_time);
515
516 if (!pair->sampling) {
517 tst_res(TINFO,
518 "Reached deviation ratios < %.2f, introducing randomness",
519 pair->max_dev_ratio);
520 tst_res(TINFO, "Delay range is [-%d, %d]",
521 (int)(pair->diff_sb.avg / per_spin_time) + pair->delay_bias,
522 (int)(pair->diff_sa.avg / per_spin_time) - pair->delay_bias);
523 tst_fzsync_pair_info(pair);
524 pair->sampling = -1;
525 }
526 } else if (!pair->sampling) {
527 tst_res(TWARN, "Can't calculate random delay");
528 tst_fzsync_pair_info(pair);
529 pair->sampling = -1;
530 }
531
532 pair->spins = 0;
533 }
534
535 /**
536 * Wait for the other thread
537 *
538 * @relates tst_fzsync_pair
539 * @param our_cntr The counter for the thread we are on
540 * @param other_cntr The counter for the thread we are synchronising with
541 * @param spins A pointer to the spin counter or NULL
542 *
543 * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
544 * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
545 * thread, then it will spin wait. Unlike pthread_barrier_wait it will never
546 * use futex and can count the number of spins spent waiting.
547 *
548 * @return A non-zero value if the thread should continue otherwise the
549 * calling thread should exit.
550 */
tst_fzsync_pair_wait(int * our_cntr,int * other_cntr,int * spins)551 static inline void tst_fzsync_pair_wait(int *our_cntr,
552 int *other_cntr,
553 int *spins)
554 {
555 if (tst_atomic_inc(other_cntr) == INT_MAX) {
556 /*
557 * We are about to break the invariant that the thread with
558 * the lowest count is in front of the other. So we must wait
559 * here to ensure the other thread has at least reached the
560 * line above before doing that. If we are in rear position
561 * then our counter may already have been set to zero.
562 */
563 while (tst_atomic_load(our_cntr) > 0
564 && tst_atomic_load(our_cntr) < INT_MAX) {
565 if (spins)
566 (*spins)++;
567 }
568
569 tst_atomic_store(0, other_cntr);
570 /*
571 * Once both counters have been set to zero the invariant
572 * is restored and we can continue.
573 */
574 while (tst_atomic_load(our_cntr) > 1)
575 ;
576 } else {
577 /*
578 * If our counter is less than the other thread's we are ahead
579 * of it and need to wait.
580 */
581 while (tst_atomic_load(our_cntr) < tst_atomic_load(other_cntr)) {
582 if (spins)
583 (*spins)++;
584 }
585 }
586 }
587
588 /**
589 * Wait in thread A
590 *
591 * @relates tst_fzsync_pair
592 * @sa tst_fzsync_pair_wait
593 */
tst_fzsync_wait_a(struct tst_fzsync_pair * pair)594 static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
595 {
596 tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL);
597 }
598
599 /**
600 * Wait in thread B
601 *
602 * @relates tst_fzsync_pair
603 * @sa tst_fzsync_pair_wait
604 */
tst_fzsync_wait_b(struct tst_fzsync_pair * pair)605 static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
606 {
607 tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL);
608 }
609
610 /**
611 * Decide whether to continue running thread A
612 *
613 * @relates tst_fzsync_pair
614 *
615 * Checks some values and decides whether it is time to break the loop of
616 * thread A.
617 *
618 * @return True to continue and false to break.
619 * @sa tst_fzsync_run_a
620 */
tst_fzsync_run_a(struct tst_fzsync_pair * pair)621 static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
622 {
623 int exit = 0;
624 float rem_p = 1 - tst_timeout_remaining() / pair->exec_time_start;
625
626 if ((pair->exec_time_p * SAMPLING_SLICE < rem_p)
627 && (pair->sampling > 0)) {
628 tst_res(TINFO, "Stopped sampling at %d (out of %d) samples, "
629 "sampling time reached 50%% of the total time limit",
630 pair->exec_loop, pair->min_samples);
631 pair->sampling = 0;
632 tst_fzsync_pair_info(pair);
633 }
634
635 if (pair->exec_time_p < rem_p) {
636 tst_res(TINFO,
637 "Exceeded execution time, requesting exit");
638 exit = 1;
639 }
640
641 if (++pair->exec_loop > pair->exec_loops) {
642 tst_res(TINFO,
643 "Exceeded execution loops, requesting exit");
644 exit = 1;
645 }
646
647 tst_atomic_store(exit, &pair->exit);
648 tst_fzsync_wait_a(pair);
649
650 if (exit) {
651 tst_fzsync_pair_cleanup(pair);
652 return 0;
653 }
654
655 return 1;
656 }
657
658 /**
659 * Decide whether to continue running thread B
660 *
661 * @relates tst_fzsync_pair
662 * @sa tst_fzsync_run_a
663 */
tst_fzsync_run_b(struct tst_fzsync_pair * pair)664 static inline int tst_fzsync_run_b(struct tst_fzsync_pair *pair)
665 {
666 tst_fzsync_wait_b(pair);
667 return !tst_atomic_load(&pair->exit);
668 }
669
670 /**
671 * Marks the start of a race region in thread A
672 *
673 * @relates tst_fzsync_pair
674 *
675 * This should be placed just before performing whatever action can cause a
676 * race condition. Usually it is placed just before a syscall and
677 * tst_fzsync_end_race_a() is placed just afterwards.
678 *
679 * A corresponding call to tst_fzsync_start_race_b() should be made in thread
680 * B.
681 *
682 * @return A non-zero value if the calling thread should continue to loop. If
683 * it returns zero then tst_fzsync_exit() has been called and you must exit
684 * the thread.
685 *
686 * @sa tst_fzsync_pair_update
687 */
tst_fzsync_start_race_a(struct tst_fzsync_pair * pair)688 static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
689 {
690 volatile int delay;
691
692 tst_fzsync_pair_update(pair);
693
694 tst_fzsync_wait_a(pair);
695
696 delay = pair->delay;
697 while (delay < 0)
698 delay++;
699
700 tst_fzsync_time(&pair->a_start);
701 }
702
703 /**
704 * Marks the end of a race region in thread A
705 *
706 * @relates tst_fzsync_pair
707 * @sa tst_fzsync_start_race_a
708 */
tst_fzsync_end_race_a(struct tst_fzsync_pair * pair)709 static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
710 {
711 tst_fzsync_time(&pair->a_end);
712 tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins);
713 }
714
715 /**
716 * Marks the start of a race region in thread B
717 *
718 * @relates tst_fzsync_pair
719 * @sa tst_fzsync_start_race_a
720 */
tst_fzsync_start_race_b(struct tst_fzsync_pair * pair)721 static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
722 {
723 volatile int delay;
724
725 tst_fzsync_wait_b(pair);
726
727 delay = pair->delay;
728 while (delay > 0)
729 delay--;
730
731 tst_fzsync_time(&pair->b_start);
732 }
733
734 /**
735 * Marks the end of a race region in thread B
736 *
737 * @relates tst_fzsync_pair
738 * @sa tst_fzsync_start_race_a
739 */
tst_fzsync_end_race_b(struct tst_fzsync_pair * pair)740 static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
741 {
742 tst_fzsync_time(&pair->b_end);
743 tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins);
744 }
745
746 /**
747 * Add some amount to the delay bias
748 *
749 * @relates tst_fzsync_pair
750 * @param change The amount to add, can be negative
751 *
752 * A positive change delays thread B and a negative one delays thread
753 * A.
754 *
755 * It is intended to be used in tests where the time taken by syscall A and/or
756 * B are significantly affected by their chronological order. To the extent
757 * that the delay range will not include the correct values if too many of the
758 * initial samples are taken when the syscalls (or operations within the
759 * syscalls) happen in the wrong order.
760 *
761 * An example of this is cve/cve-2016-7117.c where a call to close() is racing
762 * with a call to recvmmsg(). If close() happens before recvmmsg() has chance
763 * to check if the file descriptor is open then recvmmsg() completes very
764 * quickly. If the call to close() happens once recvmmsg() has already checked
765 * the descriptor it takes much longer. The sample where recvmmsg() completes
766 * quickly is essentially invalid for our purposes. The test uses the simple
767 * heuristic of whether recvmmsg() returns EBADF, to decide if it should call
768 * tst_fzsync_pair_add_bias() to further delay syscall B.
769 */
tst_fzsync_pair_add_bias(struct tst_fzsync_pair * pair,int change)770 static inline void tst_fzsync_pair_add_bias(struct tst_fzsync_pair *pair, int change)
771 {
772 if (pair->sampling > 0)
773 pair->delay_bias += change;
774 }
775
776 #endif /* TST_FUZZY_SYNC_H__ */
777