1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3 * Copyright (c) 2017-2018 Richard Palethorpe <rpalethorpe@suse.com>
4 */
5 /**
6 * @file tst_fuzzy_sync.h
7 * Fuzzy Synchronisation - abbreviated to fzsync
8 *
9 * This library is intended to help reproduce race conditions by synchronising
10 * two threads at a given place by marking the range a race may occur
11 * in. Because the exact place where any race occurs is within the kernel,
12 * and therefore impossible to mark accurately, the library may add randomised
13 * delays to either thread in order to help find the exact race timing.
14 *
15 * Currently only two way races are explicitly supported, that is races
16 * involving two threads or processes. We refer to the main test thread as
17 * thread A and the child thread as thread B.
18 *
19 * In each thread you need a simple while- or for-loop which the tst_fzsync_*
20 * functions are called in. In the simplest case thread A will look something
21 * like:
22 *
23 * tst_fzsync_pair_reset(&pair, run_thread_b);
24 * while (tst_fzsync_run_a(&pair)) {
25 * // Perform some setup which must happen before the race
26 * tst_fzsync_start_race_a(&pair);
27 * // Do some dodgy syscall
28 * tst_fzsync_end_race_a(&pair);
29 * }
30 *
31 * Then in thread B (run_thread_b):
32 *
33 * while (tst_fzsync_run_b(&pair)) {
34 * tst_fzsync_start_race_b(&pair);
35 * // Do something which can race with the dodgy syscall in A
36 * tst_fzsync_end_race_b(&pair)
37 * }
38 *
39 * The calls to tst_fzsync_start/end_race and tst_fzsync_run_a/b block (at
40 * least) until both threads have enter them. These functions can only be
41 * called once for each iteration, but further synchronisation points can be
42 * added by calling tst_fzsync_wait_a() and tst_fzsync_wait_b() in each
43 * thread.
44 *
45 * The execution of the loops in threads A and B are bounded by both iteration
46 * count and time. A slow machine is likely to be limited by time and a fast
47 * one by iteration count. The user can use the -i parameter to run the test
48 * multiple times or LTP_TIMEOUT_MUL to give the test more time.
49 *
50 * It is possible to use the library just for tst_fzsync_pair_wait() to get a
51 * basic spin wait. However if you are actually testing a race condition then
52 * it is recommended to use tst_fzsync_start_race_a/b even if the
53 * randomisation is not needed. It provides some semantic information which
54 * may be useful in the future.
55 *
56 * For a usage example see testcases/cve/cve-2016-7117.c or just run
57 * 'git grep tst_fuzzy_sync.h'
58 *
59 * @sa tst_fzsync_pair
60 */
61
62 #include <math.h>
63 #include <pthread.h>
64 #include <stdbool.h>
65 #include <stdlib.h>
66 #include <sys/time.h>
67 #include <time.h>
68 #include "tst_atomic.h"
69 #include "tst_cpu.h"
70 #include "tst_timer.h"
71 #include "tst_safe_pthread.h"
72
73 #ifndef TST_FUZZY_SYNC_H__
74 #define TST_FUZZY_SYNC_H__
75
76 /* how much of exec time is sampling allowed to take */
77 #define SAMPLING_SLICE 0.5f
78
79 /** Some statistics for a variable */
80 struct tst_fzsync_stat {
81 float avg;
82 float avg_dev;
83 float dev_ratio;
84 };
85
86 /**
87 * The state of a two way synchronisation or race.
88 *
89 * This contains all the necessary state for approximately synchronising two
90 * sections of code in different threads.
91 *
92 * Some of the fields can be configured before calling
93 * tst_fzsync_pair_reset(), however this is mainly for debugging purposes. If
94 * a test requires one of the parameters to be modified, we should consider
95 * finding a way of automatically selecting an appropriate value at runtime.
96 *
97 * Internal fields should only be accessed by library functions.
98 */
99 struct tst_fzsync_pair {
100 /**
101 * The rate at which old diff samples are forgotten
102 *
103 * Defaults to 0.25.
104 */
105 float avg_alpha;
106 /** Internal; Thread A start time */
107 struct timespec a_start;
108 /** Internal; Thread B start time */
109 struct timespec b_start;
110 /** Internal; Thread A end time */
111 struct timespec a_end;
112 /** Internal; Thread B end time */
113 struct timespec b_end;
114 /** Internal; Avg. difference between a_start and b_start */
115 struct tst_fzsync_stat diff_ss;
116 /** Internal; Avg. difference between a_start and a_end */
117 struct tst_fzsync_stat diff_sa;
118 /** Internal; Avg. difference between b_start and b_end */
119 struct tst_fzsync_stat diff_sb;
120 /** Internal; Avg. difference between a_end and b_end */
121 struct tst_fzsync_stat diff_ab;
122 /** Internal; Number of spins while waiting for the slower thread */
123 int spins;
124 struct tst_fzsync_stat spins_avg;
125 /**
126 * Internal; Number of spins to use in the delay.
127 *
128 * A negative value delays thread A and a positive delays thread B.
129 */
130 int delay;
131 int delay_bias;
132 /**
133 * Internal; The number of samples left or the sampling state.
134 *
135 * A positive value is the number of remaining mandatory
136 * samples. Zero or a negative indicate some other state.
137 */
138 int sampling;
139 /**
140 * The Minimum number of statistical samples which must be collected.
141 *
142 * The minimum number of iterations which must be performed before a
143 * random delay can be calculated. Defaults to 1024.
144 */
145 int min_samples;
146 /**
147 * The maximum allowed proportional average deviation.
148 *
149 * A value in the range (0, 1) which gives the maximum average
150 * deviation which must be attained before random delays can be
151 * calculated.
152 *
153 * It is a ratio of (average_deviation / total_time). The default is
154 * 0.1, so this allows an average deviation of at most 10%.
155 */
156 float max_dev_ratio;
157
158 /** Internal; Atomic counter used by fzsync_pair_wait() */
159 int a_cntr;
160 /** Internal; Atomic counter used by fzsync_pair_wait() */
161 int b_cntr;
162 /** Internal; Used by tst_fzsync_pair_exit() and fzsync_pair_wait() */
163 int exit;
164 /**
165 * The maximum desired execution time as a proportion of the timeout
166 *
167 * A value x so that 0 < x < 1 which decides how long the test should
168 * be run for (assuming the loop limit is not exceeded first).
169 *
170 * Defaults to 0.5 (~150 seconds with default timeout).
171 */
172 float exec_time_p;
173 /** Internal; The test time remaining on tst_fzsync_pair_reset() */
174 float exec_time_start;
175 /**
176 * The maximum number of iterations to execute during the test
177 *
178 * Defaults to a large number, but not too large.
179 */
180 int exec_loops;
181 /** Internal; The current loop index */
182 int exec_loop;
183 /** Internal; The second thread or 0 */
184 pthread_t thread_b;
185 /**
186 * The flag indicates single core machines or not
187 *
188 * If running on single core machines, it would take considerable
189 * amount of time to run fuzzy sync library.
190 * Thus call sched_yield to give up cpu to decrease the test time.
191 */
192 bool yield_in_wait;
193
194 };
195
196 #define CHK(param, low, hi, def) do { \
197 pair->param = (pair->param ? pair->param : def); \
198 if (pair->param < low) \
199 tst_brk(TBROK, #param " is less than the lower bound " #low); \
200 if (pair->param > hi) \
201 tst_brk(TBROK, #param " is more than the upper bound " #hi); \
202 } while (0)
203 /**
204 * Ensures that any Fuzzy Sync parameters are properly set
205 *
206 * @relates tst_fzsync_pair
207 *
208 * Usually called from the setup function, it sets default parameter values or
209 * validates any existing non-defaults.
210 *
211 * @sa tst_fzsync_pair_reset()
212 */
tst_fzsync_pair_init(struct tst_fzsync_pair * pair)213 static inline void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
214 {
215 CHK(avg_alpha, 0, 1, 0.25);
216 CHK(min_samples, 20, INT_MAX, 1024);
217 CHK(max_dev_ratio, 0, 1, 0.1);
218 CHK(exec_time_p, 0, 1, 0.5);
219 CHK(exec_loops, 20, INT_MAX, 3000000);
220
221 if (tst_ncpus_available() <= 1)
222 pair->yield_in_wait = 1;
223 }
224 #undef CHK
225
226 /**
227 * Exit and join thread B if necessary.
228 *
229 * @relates tst_fzsync_pair
230 *
231 * Call this from your cleanup function.
232 */
tst_fzsync_pair_cleanup(struct tst_fzsync_pair * pair)233 static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
234 {
235 if (pair->thread_b) {
236 /* Revoke thread B if parent hits accidental break */
237 if (!pair->exit) {
238 tst_atomic_store(1, &pair->exit);
239 usleep(100000);
240 pthread_cancel(pair->thread_b);
241 }
242 SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
243 pair->thread_b = 0;
244 }
245 }
246
247 /** To store the run_b pointer and pass to tst_fzsync_thread_wrapper */
248 struct tst_fzsync_run_thread {
249 void *(*func)(void *);
250 void *arg;
251 };
252
253 /**
254 * Wrap run_b for tst_fzsync_pair_reset to enable pthread cancel
255 * at the start of the thread B.
256 */
tst_fzsync_thread_wrapper(void * run_thread)257 static inline void *tst_fzsync_thread_wrapper(void *run_thread)
258 {
259 struct tst_fzsync_run_thread t = *(struct tst_fzsync_run_thread *)run_thread;
260
261 pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
262 pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
263 return t.func(t.arg);
264 }
265
266 /**
267 * Zero some stat fields
268 *
269 * @relates tst_fzsync_stat
270 */
tst_init_stat(struct tst_fzsync_stat * s)271 static inline void tst_init_stat(struct tst_fzsync_stat *s)
272 {
273 s->avg = 0;
274 s->avg_dev = 0;
275 }
276
277 /**
278 * Reset or initialise fzsync.
279 *
280 * @relates tst_fzsync_pair
281 * @param pair The state structure initialised with TST_FZSYNC_PAIR_INIT.
282 * @param run_b The function defining thread B or NULL.
283 *
284 * Call this from your main test function (thread A), just before entering the
285 * main loop. It will (re)set any variables needed by fzsync and (re)start
286 * thread B using the function provided.
287 *
288 * If you need to use fork or clone to start the second thread/process then
289 * you can pass NULL to run_b and handle starting and stopping thread B
290 * yourself. You may need to place tst_fzsync_pair in some shared memory as
291 * well.
292 *
293 * @sa tst_fzsync_pair_init()
294 */
tst_fzsync_pair_reset(struct tst_fzsync_pair * pair,void * (* run_b)(void *))295 static inline void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
296 void *(*run_b)(void *))
297 {
298 tst_fzsync_pair_cleanup(pair);
299
300 tst_init_stat(&pair->diff_ss);
301 tst_init_stat(&pair->diff_sa);
302 tst_init_stat(&pair->diff_sb);
303 tst_init_stat(&pair->diff_ab);
304 tst_init_stat(&pair->spins_avg);
305 pair->delay = 0;
306 pair->delay_bias = 0;
307 pair->sampling = pair->min_samples;
308
309 pair->exec_loop = 0;
310
311 pair->a_cntr = 0;
312 pair->b_cntr = 0;
313 pair->exit = 0;
314 if (run_b) {
315 static struct tst_fzsync_run_thread wrap_run_b;
316
317 wrap_run_b.func = run_b;
318 wrap_run_b.arg = NULL;
319 SAFE_PTHREAD_CREATE(&pair->thread_b, 0, tst_fzsync_thread_wrapper, &wrap_run_b);
320 }
321
322 pair->exec_time_start = (float)tst_timeout_remaining();
323 }
324
325 /**
326 * Print stat
327 *
328 * @relates tst_fzsync_stat
329 */
tst_fzsync_stat_info(struct tst_fzsync_stat stat,char * unit,char * name)330 static inline void tst_fzsync_stat_info(struct tst_fzsync_stat stat,
331 char *unit, char *name)
332 {
333 tst_res(TINFO,
334 "%1$-17s: { avg = %3$5.0f%2$s, avg_dev = %4$5.0f%2$s, dev_ratio = %5$.2f }",
335 name, unit, stat.avg, stat.avg_dev, stat.dev_ratio);
336 }
337
338 /**
339 * Print some synchronisation statistics
340 *
341 * @relates tst_fzsync_pair
342 */
tst_fzsync_pair_info(struct tst_fzsync_pair * pair)343 static inline void tst_fzsync_pair_info(struct tst_fzsync_pair *pair)
344 {
345 tst_res(TINFO, "loop = %d, delay_bias = %d",
346 pair->exec_loop, pair->delay_bias);
347 tst_fzsync_stat_info(pair->diff_ss, "ns", "start_a - start_b");
348 tst_fzsync_stat_info(pair->diff_sa, "ns", "end_a - start_a");
349 tst_fzsync_stat_info(pair->diff_sb, "ns", "end_b - start_b");
350 tst_fzsync_stat_info(pair->diff_ab, "ns", "end_a - end_b");
351 tst_fzsync_stat_info(pair->spins_avg, " ", "spins");
352 }
353
354 /** Wraps clock_gettime */
tst_fzsync_time(struct timespec * t)355 static inline void tst_fzsync_time(struct timespec *t)
356 {
357 #ifdef CLOCK_MONOTONIC_RAW
358 clock_gettime(CLOCK_MONOTONIC_RAW, t);
359 #else
360 clock_gettime(CLOCK_MONOTONIC, t);
361 #endif
362 }
363
364 /**
365 * Exponential moving average
366 *
367 * @param alpha The preference for recent samples over old ones.
368 * @param sample The current sample
369 * @param prev_avg The average of the all the previous samples
370 *
371 * @return The average including the current sample.
372 */
tst_exp_moving_avg(float alpha,float sample,float prev_avg)373 static inline float tst_exp_moving_avg(float alpha,
374 float sample,
375 float prev_avg)
376 {
377 return alpha * sample + (1.0 - alpha) * prev_avg;
378 }
379
380 /**
381 * Update a stat with a new sample
382 *
383 * @relates tst_fzsync_stat
384 */
tst_upd_stat(struct tst_fzsync_stat * s,float alpha,float sample)385 static inline void tst_upd_stat(struct tst_fzsync_stat *s,
386 float alpha,
387 float sample)
388 {
389 s->avg = tst_exp_moving_avg(alpha, sample, s->avg);
390 s->avg_dev = tst_exp_moving_avg(alpha,
391 fabs(s->avg - sample), s->avg_dev);
392 s->dev_ratio = fabs(s->avg ? s->avg_dev / s->avg : 0);
393 }
394
395 /**
396 * Update a stat with a new diff sample
397 *
398 * @relates tst_fzsync_stat
399 */
tst_upd_diff_stat(struct tst_fzsync_stat * s,float alpha,struct timespec t1,struct timespec t2)400 static inline void tst_upd_diff_stat(struct tst_fzsync_stat *s,
401 float alpha,
402 struct timespec t1,
403 struct timespec t2)
404 {
405 tst_upd_stat(s, alpha, tst_timespec_diff_ns(t1, t2));
406 }
407
408 /**
409 * Calculate various statistics and the delay
410 *
411 * This function helps create the fuzz in fuzzy sync. Imagine we have the
412 * following timelines in threads A and B:
413 *
414 * start_race_a
415 * ^ end_race_a (a)
416 * | ^
417 * | |
418 * - --+------------------------+-- - -
419 * | Syscall A | Thread A
420 * - --+------------------------+-- - -
421 * - --+----------------+-------+-- - -
422 * | Syscall B | spin | Thread B
423 * - --+----------------+-------+-- - -
424 * | |
425 * ^ ^
426 * start_race_b end_race_b
427 *
428 * Here we have synchronised the calls to syscall A and B with start_race_{a,
429 * b} so that they happen at approximately the same time in threads A and
430 * B. If the race condition occurs during the entry code for these two
431 * functions then we will quickly hit it. If it occurs during the exit code of
432 * B and mid way through A, then we will quickly hit it.
433 *
434 * However if the exit paths of A and B need to be aligned and (end_race_a -
435 * end_race_b) is large relative to the variation in call times, the
436 * probability of hitting the race condition is close to zero. To solve this
437 * scenario (and others) a randomised delay is introduced before the syscalls
438 * in A and B. Given enough time the following should happen where the exit
439 * paths are now synchronised:
440 *
441 * start_race_a
442 * ^ end_race_a (a)
443 * | ^
444 * | |
445 * - --+------------------------+-- - -
446 * | Syscall A | Thread A
447 * - --+------------------------+-- - -
448 * - --+-------+----------------+-- - -
449 * | delay | Syscall B | Thread B
450 * - --+-------+----------------+-- - -
451 * | |
452 * ^ ^
453 * start_race_b end_race_b
454 *
455 * The delay is not introduced immediately and the delay range is only
456 * calculated once the average relative deviation has dropped below some
457 * percentage of the total time.
458 *
459 * The delay range is chosen so that any point in Syscall A could be
460 * synchronised with any point in Syscall B using a value from the
461 * range. Because the delay range may be too large for a linear search, we use
462 * an evenly distributed random function to pick a value from it.
463 *
464 * The delay range goes from positive to negative. A negative delay will delay
465 * thread A and a positive one will delay thread B. The range is bounded by
466 * the point where the entry code to Syscall A is synchronised with the exit
467 * to Syscall B and the entry code to Syscall B is synchronised with the exit
468 * of A.
469 *
470 * In order to calculate the lower bound (the max delay of A) we can simply
471 * negate the execution time of Syscall B and convert it to a spin count. For
472 * the upper bound (the max delay of B), we just take the execution time of A
473 * and convert it to a spin count.
474 *
475 * In order to calculate spin count we need to know approximately how long a
476 * spin takes and divide the delay time with it. We find this by first
477 * counting how many spins one thread spends waiting for the other during
478 * end_race[1]. We also know when each syscall exits so we can take the
479 * difference between the exit times and divide it with the number of spins
480 * spent waiting.
481 *
482 * All the times and counts we use in the calculation are averaged over a
483 * variable number of iterations. There is an initial sampling period where we
484 * simply collect time and count samples then calculate their averages. When a
485 * minimum number of samples have been collected, and if the average deviation
486 * is below some proportion of the average sample magnitude, then the sampling
487 * period is ended. On all further iterations a random delay is calculated and
488 * applied, but the averages are not updated.
489 *
490 * [1] This assumes there is always a significant difference. The algorithm
491 * may fail to introduce a delay (when one is needed) in situations where
492 * Syscall A and B finish at approximately the same time.
493 *
494 * @relates tst_fzsync_pair
495 */
tst_fzsync_pair_update(struct tst_fzsync_pair * pair)496 static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
497 {
498 float alpha = pair->avg_alpha;
499 float per_spin_time, time_delay;
500 float max_dev = pair->max_dev_ratio;
501 int over_max_dev;
502
503 pair->delay = pair->delay_bias;
504
505 over_max_dev = pair->diff_ss.dev_ratio > max_dev
506 || pair->diff_sa.dev_ratio > max_dev
507 || pair->diff_sb.dev_ratio > max_dev
508 || pair->diff_ab.dev_ratio > max_dev
509 || pair->spins_avg.dev_ratio > max_dev;
510
511 if (pair->sampling > 0 || over_max_dev) {
512 tst_upd_diff_stat(&pair->diff_ss, alpha,
513 pair->a_start, pair->b_start);
514 tst_upd_diff_stat(&pair->diff_sa, alpha,
515 pair->a_end, pair->a_start);
516 tst_upd_diff_stat(&pair->diff_sb, alpha,
517 pair->b_end, pair->b_start);
518 tst_upd_diff_stat(&pair->diff_ab, alpha,
519 pair->a_end, pair->b_end);
520 tst_upd_stat(&pair->spins_avg, alpha, pair->spins);
521 if (pair->sampling > 0 && --pair->sampling == 0) {
522 tst_res(TINFO, "Minimum sampling period ended");
523 tst_fzsync_pair_info(pair);
524 }
525 } else if (fabsf(pair->diff_ab.avg) >= 1) {
526 per_spin_time = fabsf(pair->diff_ab.avg) / MAX(pair->spins_avg.avg, 1.0f);
527 time_delay = drand48() * (pair->diff_sa.avg + pair->diff_sb.avg)
528 - pair->diff_sb.avg;
529 pair->delay += (int)(1.1 * time_delay / per_spin_time);
530
531 if (!pair->sampling) {
532 tst_res(TINFO,
533 "Reached deviation ratios < %.2f, introducing randomness",
534 pair->max_dev_ratio);
535 tst_res(TINFO, "Delay range is [%d, %d]",
536 -(int)(pair->diff_sb.avg / per_spin_time) + pair->delay_bias,
537 (int)(pair->diff_sa.avg / per_spin_time) + pair->delay_bias);
538 tst_fzsync_pair_info(pair);
539 pair->sampling = -1;
540 }
541 } else if (!pair->sampling) {
542 tst_res(TWARN, "Can't calculate random delay");
543 tst_fzsync_pair_info(pair);
544 pair->sampling = -1;
545 }
546
547 pair->spins = 0;
548 }
549
550 /**
551 * Wait for the other thread
552 *
553 * @relates tst_fzsync_pair
554 * @param our_cntr The counter for the thread we are on
555 * @param other_cntr The counter for the thread we are synchronising with
556 * @param spins A pointer to the spin counter or NULL
557 *
558 * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
559 * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
560 * thread, then it will spin wait. Unlike pthread_barrier_wait it will never
561 * use futex and can count the number of spins spent waiting.
562 *
563 * @return A non-zero value if the thread should continue otherwise the
564 * calling thread should exit.
565 */
tst_fzsync_pair_wait(int * our_cntr,int * other_cntr,int * spins,bool yield_in_wait)566 static inline void tst_fzsync_pair_wait(int *our_cntr,
567 int *other_cntr,
568 int *spins,
569 bool yield_in_wait)
570 {
571 if (tst_atomic_inc(other_cntr) == INT_MAX) {
572 /*
573 * We are about to break the invariant that the thread with
574 * the lowest count is in front of the other. So we must wait
575 * here to ensure the other thread has at least reached the
576 * line above before doing that. If we are in rear position
577 * then our counter may already have been set to zero.
578 */
579 if (yield_in_wait) {
580 while (tst_atomic_load(our_cntr) > 0
581 && tst_atomic_load(our_cntr) < INT_MAX) {
582 if (spins)
583 (*spins)++;
584
585 sched_yield();
586 }
587 } else {
588 while (tst_atomic_load(our_cntr) > 0
589 && tst_atomic_load(our_cntr) < INT_MAX) {
590 if (spins)
591 (*spins)++;
592 }
593 }
594
595
596 tst_atomic_store(0, other_cntr);
597 /*
598 * Once both counters have been set to zero the invariant
599 * is restored and we can continue.
600 */
601 if (yield_in_wait) {
602 while (tst_atomic_load(our_cntr) > 1)
603 sched_yield();
604 } else {
605 while (tst_atomic_load(our_cntr) > 1)
606 ;
607 }
608 } else {
609 /*
610 * If our counter is less than the other thread's we are ahead
611 * of it and need to wait.
612 */
613 if (yield_in_wait) {
614 while (tst_atomic_load(our_cntr) <
615 tst_atomic_load(other_cntr)) {
616 if (spins)
617 (*spins)++;
618 sched_yield();
619 }
620 } else {
621 while (tst_atomic_load(our_cntr) <
622 tst_atomic_load(other_cntr)) {
623 if (spins)
624 (*spins)++;
625 }
626 }
627 }
628 }
629
630 /**
631 * Wait in thread A
632 *
633 * @relates tst_fzsync_pair
634 * @sa tst_fzsync_pair_wait
635 */
tst_fzsync_wait_a(struct tst_fzsync_pair * pair)636 static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
637 {
638 tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL, pair->yield_in_wait);
639 }
640
641 /**
642 * Wait in thread B
643 *
644 * @relates tst_fzsync_pair
645 * @sa tst_fzsync_pair_wait
646 */
tst_fzsync_wait_b(struct tst_fzsync_pair * pair)647 static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
648 {
649 tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL, pair->yield_in_wait);
650 }
651
652 /**
653 * Decide whether to continue running thread A
654 *
655 * @relates tst_fzsync_pair
656 *
657 * Checks some values and decides whether it is time to break the loop of
658 * thread A.
659 *
660 * @return True to continue and false to break.
661 * @sa tst_fzsync_run_a
662 */
tst_fzsync_run_a(struct tst_fzsync_pair * pair)663 static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
664 {
665 int exit = 0;
666 float rem_p = 1 - tst_timeout_remaining() / pair->exec_time_start;
667
668 if ((pair->exec_time_p * SAMPLING_SLICE < rem_p)
669 && (pair->sampling > 0)) {
670 tst_res(TINFO, "Stopped sampling at %d (out of %d) samples, "
671 "sampling time reached 50%% of the total time limit",
672 pair->exec_loop, pair->min_samples);
673 pair->sampling = 0;
674 tst_fzsync_pair_info(pair);
675 }
676
677 if (pair->exec_time_p < rem_p) {
678 tst_res(TINFO,
679 "Exceeded execution time, requesting exit");
680 exit = 1;
681 }
682
683 if (++pair->exec_loop > pair->exec_loops) {
684 tst_res(TINFO,
685 "Exceeded execution loops, requesting exit");
686 exit = 1;
687 }
688
689 tst_atomic_store(exit, &pair->exit);
690 tst_fzsync_wait_a(pair);
691
692 if (exit) {
693 tst_fzsync_pair_cleanup(pair);
694 return 0;
695 }
696
697 return 1;
698 }
699
700 /**
701 * Decide whether to continue running thread B
702 *
703 * @relates tst_fzsync_pair
704 * @sa tst_fzsync_run_a
705 */
tst_fzsync_run_b(struct tst_fzsync_pair * pair)706 static inline int tst_fzsync_run_b(struct tst_fzsync_pair *pair)
707 {
708 tst_fzsync_wait_b(pair);
709 return !tst_atomic_load(&pair->exit);
710 }
711
712 /**
713 * Marks the start of a race region in thread A
714 *
715 * @relates tst_fzsync_pair
716 *
717 * This should be placed just before performing whatever action can cause a
718 * race condition. Usually it is placed just before a syscall and
719 * tst_fzsync_end_race_a() is placed just afterwards.
720 *
721 * A corresponding call to tst_fzsync_start_race_b() should be made in thread
722 * B.
723 *
724 * @return A non-zero value if the calling thread should continue to loop. If
725 * it returns zero then tst_fzsync_exit() has been called and you must exit
726 * the thread.
727 *
728 * @sa tst_fzsync_pair_update
729 */
tst_fzsync_start_race_a(struct tst_fzsync_pair * pair)730 static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
731 {
732 volatile int delay;
733
734 tst_fzsync_pair_update(pair);
735
736 tst_fzsync_wait_a(pair);
737
738 delay = pair->delay;
739 if (pair->yield_in_wait) {
740 while (delay < 0) {
741 sched_yield();
742 delay++;
743 }
744 } else {
745 while (delay < 0)
746 delay++;
747 }
748
749 tst_fzsync_time(&pair->a_start);
750 }
751
752 /**
753 * Marks the end of a race region in thread A
754 *
755 * @relates tst_fzsync_pair
756 * @sa tst_fzsync_start_race_a
757 */
tst_fzsync_end_race_a(struct tst_fzsync_pair * pair)758 static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
759 {
760 tst_fzsync_time(&pair->a_end);
761 tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins, pair->yield_in_wait);
762 }
763
764 /**
765 * Marks the start of a race region in thread B
766 *
767 * @relates tst_fzsync_pair
768 * @sa tst_fzsync_start_race_a
769 */
tst_fzsync_start_race_b(struct tst_fzsync_pair * pair)770 static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
771 {
772 volatile int delay;
773
774 tst_fzsync_wait_b(pair);
775
776 delay = pair->delay;
777 if (pair->yield_in_wait) {
778 while (delay > 0) {
779 sched_yield();
780 delay--;
781 }
782 } else {
783 while (delay > 0)
784 delay--;
785 }
786
787 tst_fzsync_time(&pair->b_start);
788 }
789
790 /**
791 * Marks the end of a race region in thread B
792 *
793 * @relates tst_fzsync_pair
794 * @sa tst_fzsync_start_race_a
795 */
tst_fzsync_end_race_b(struct tst_fzsync_pair * pair)796 static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
797 {
798 tst_fzsync_time(&pair->b_end);
799 tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins, pair->yield_in_wait);
800 }
801
802 /**
803 * Add some amount to the delay bias
804 *
805 * @relates tst_fzsync_pair
806 * @param change The amount to add, can be negative
807 *
808 * A positive change delays thread B and a negative one delays thread
809 * A.
810 *
811 * It is intended to be used in tests where the time taken by syscall A and/or
812 * B are significantly affected by their chronological order. To the extent
813 * that the delay range will not include the correct values if too many of the
814 * initial samples are taken when the syscalls (or operations within the
815 * syscalls) happen in the wrong order.
816 *
817 * An example of this is cve/cve-2016-7117.c where a call to close() is racing
818 * with a call to recvmmsg(). If close() happens before recvmmsg() has chance
819 * to check if the file descriptor is open then recvmmsg() completes very
820 * quickly. If the call to close() happens once recvmmsg() has already checked
821 * the descriptor it takes much longer. The sample where recvmmsg() completes
822 * quickly is essentially invalid for our purposes. The test uses the simple
823 * heuristic of whether recvmmsg() returns EBADF, to decide if it should call
824 * tst_fzsync_pair_add_bias() to further delay syscall B.
825 */
tst_fzsync_pair_add_bias(struct tst_fzsync_pair * pair,int change)826 static inline void tst_fzsync_pair_add_bias(struct tst_fzsync_pair *pair, int change)
827 {
828 if (pair->sampling > 0)
829 pair->delay_bias += change;
830 }
831
832 #endif /* TST_FUZZY_SYNC_H__ */
833