• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (c) 2017-2018 Richard Palethorpe <rpalethorpe@suse.com>
4  */
5 /**
6  * @file tst_fuzzy_sync.h
7  * Fuzzy Synchronisation - abbreviated to fzsync
8  *
9  * This library is intended to help reproduce race conditions by synchronising
10  * two threads at a given place by marking the range a race may occur
11  * in. Because the exact place where any race occurs is within the kernel,
12  * and therefore impossible to mark accurately, the library may add randomised
13  * delays to either thread in order to help find the exact race timing.
14  *
15  * Currently only two way races are explicitly supported, that is races
16  * involving two threads or processes. We refer to the main test thread as
17  * thread A and the child thread as thread B.
18  *
19  * In each thread you need a simple while- or for-loop which the tst_fzsync_*
20  * functions are called in. In the simplest case thread A will look something
21  * like:
22  *
23  * tst_fzsync_pair_reset(&pair, run_thread_b);
24  * while (tst_fzsync_run_a(&pair)) {
25  *	// Perform some setup which must happen before the race
26  *	tst_fzsync_start_race_a(&pair);
27  *	// Do some dodgy syscall
28  *	tst_fzsync_end_race_a(&pair);
29  * }
30  *
31  * Then in thread B (run_thread_b):
32  *
33  * while (tst_fzsync_run_b(&pair)) {
34  *	tst_fzsync_start_race_b(&pair);
35  *	// Do something which can race with the dodgy syscall in A
36  *	tst_fzsync_end_race_b(&pair)
37  * }
38  *
39  * The calls to tst_fzsync_start/end_race and tst_fzsync_run_a/b block (at
40  * least) until both threads have enter them. These functions can only be
41  * called once for each iteration, but further synchronisation points can be
42  * added by calling tst_fzsync_wait_a() and tst_fzsync_wait_b() in each
43  * thread.
44  *
45  * The execution of the loops in threads A and B are bounded by both iteration
46  * count and time. A slow machine is likely to be limited by time and a fast
47  * one by iteration count. The user can use the -i parameter to run the test
48  * multiple times or LTP_TIMEOUT_MUL to give the test more time.
49  *
50  * It is possible to use the library just for tst_fzsync_pair_wait() to get a
51  * basic spin wait. However if you are actually testing a race condition then
52  * it is recommended to use tst_fzsync_start_race_a/b even if the
53  * randomisation is not needed. It provides some semantic information which
54  * may be useful in the future.
55  *
56  * For a usage example see testcases/cve/cve-2016-7117.c or just run
57  * 'git grep tst_fuzzy_sync.h'
58  *
59  * @sa tst_fzsync_pair
60  */
61 
62 #include <sys/time.h>
63 #include <time.h>
64 #include <math.h>
65 #include <stdlib.h>
66 #include <pthread.h>
67 #include "tst_atomic.h"
68 #include "tst_timer.h"
69 #include "tst_safe_pthread.h"
70 
71 #ifndef TST_FUZZY_SYNC_H__
72 #define TST_FUZZY_SYNC_H__
73 
74 /* how much of exec time is sampling allowed to take */
75 #define SAMPLING_SLICE 0.5f
76 
77 /** Some statistics for a variable */
78 struct tst_fzsync_stat {
79 	float avg;
80 	float avg_dev;
81 	float dev_ratio;
82 };
83 
84 /**
85  * The state of a two way synchronisation or race.
86  *
87  * This contains all the necessary state for approximately synchronising two
88  * sections of code in different threads.
89  *
90  * Some of the fields can be configured before calling
91  * tst_fzsync_pair_reset(), however this is mainly for debugging purposes. If
92  * a test requires one of the parameters to be modified, we should consider
93  * finding a way of automatically selecting an appropriate value at runtime.
94  *
95  * Internal fields should only be accessed by library functions.
96  */
97 struct tst_fzsync_pair {
98 	/**
99 	 * The rate at which old diff samples are forgotten
100 	 *
101 	 * Defaults to 0.25.
102 	 */
103 	float avg_alpha;
104 	/** Internal; Thread A start time */
105 	struct timespec a_start;
106 	/** Internal; Thread B start time */
107 	struct timespec b_start;
108 	/** Internal; Thread A end time */
109 	struct timespec a_end;
110 	/** Internal; Thread B end time */
111 	struct timespec b_end;
112 	/** Internal; Avg. difference between a_start and b_start */
113 	struct tst_fzsync_stat diff_ss;
114 	/** Internal; Avg. difference between a_start and a_end */
115 	struct tst_fzsync_stat diff_sa;
116 	/** Internal; Avg. difference between b_start and b_end */
117 	struct tst_fzsync_stat diff_sb;
118 	/** Internal; Avg. difference between a_end and b_end */
119 	struct tst_fzsync_stat diff_ab;
120 	/** Internal; Number of spins while waiting for the slower thread */
121 	int spins;
122 	struct tst_fzsync_stat spins_avg;
123 	/**
124 	 * Internal; Number of spins to use in the delay.
125 	 *
126 	 * A negative value delays thread A and a positive delays thread B.
127 	 */
128 	int delay;
129 	int delay_bias;
130 	/**
131 	 *  Internal; The number of samples left or the sampling state.
132 	 *
133 	 *  A positive value is the number of remaining mandatory
134 	 *  samples. Zero or a negative indicate some other state.
135 	 */
136 	int sampling;
137 	/**
138 	 * The Minimum number of statistical samples which must be collected.
139 	 *
140 	 * The minimum number of iterations which must be performed before a
141 	 * random delay can be calculated. Defaults to 1024.
142 	 */
143 	int min_samples;
144 	/**
145 	 * The maximum allowed proportional average deviation.
146 	 *
147 	 * A value in the range (0, 1) which gives the maximum average
148 	 * deviation which must be attained before random delays can be
149 	 * calculated.
150 	 *
151 	 * It is a ratio of (average_deviation / total_time). The default is
152 	 * 0.1, so this allows an average deviation of at most 10%.
153 	 */
154 	float max_dev_ratio;
155 
156 	/** Internal; Atomic counter used by fzsync_pair_wait() */
157 	int a_cntr;
158 	/** Internal; Atomic counter used by fzsync_pair_wait() */
159 	int b_cntr;
160 	/** Internal; Used by tst_fzsync_pair_exit() and fzsync_pair_wait() */
161 	int exit;
162 	/**
163 	 * The maximum desired execution time as a proportion of the timeout
164 	 *
165 	 * A value x so that 0 < x < 1 which decides how long the test should
166 	 * be run for (assuming the loop limit is not exceeded first).
167 	 *
168 	 * Defaults to 0.5 (~150 seconds with default timeout).
169 	 */
170 	float exec_time_p;
171 	/** Internal; The test time remaining on tst_fzsync_pair_reset() */
172 	float exec_time_start;
173 	/**
174 	 * The maximum number of iterations to execute during the test
175 	 *
176 	 * Defaults to a large number, but not too large.
177 	 */
178 	int exec_loops;
179 	/** Internal; The current loop index  */
180 	int exec_loop;
181 	/** Internal; The second thread or 0 */
182 	pthread_t thread_b;
183 };
184 
185 #define CHK(param, low, hi, def) do {					      \
186 	pair->param = (pair->param ? pair->param : def);		      \
187 	if (pair->param < low)						      \
188 		tst_brk(TBROK, #param " is less than the lower bound " #low); \
189 	if (pair->param > hi)						      \
190 		tst_brk(TBROK, #param " is more than the upper bound " #hi);  \
191 	} while (0)
192 /**
193  * Ensures that any Fuzzy Sync parameters are properly set
194  *
195  * @relates tst_fzsync_pair
196  *
197  * Usually called from the setup function, it sets default parameter values or
198  * validates any existing non-defaults.
199  *
200  * @sa tst_fzsync_pair_reset()
201  */
tst_fzsync_pair_init(struct tst_fzsync_pair * pair)202 static void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
203 {
204 	CHK(avg_alpha, 0, 1, 0.25);
205 	CHK(min_samples, 20, INT_MAX, 1024);
206 	CHK(max_dev_ratio, 0, 1, 0.1);
207 	CHK(exec_time_p, 0, 1, 0.5);
208 	CHK(exec_loops, 20, INT_MAX, 3000000);
209 }
210 #undef CHK
211 
212 /**
213  * Exit and join thread B if necessary.
214  *
215  * @relates tst_fzsync_pair
216  *
217  * Call this from your cleanup function.
218  */
tst_fzsync_pair_cleanup(struct tst_fzsync_pair * pair)219 static void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
220 {
221 	if (pair->thread_b) {
222 		/* Revoke thread B if parent hits accidental break */
223 		if (!pair->exit) {
224 			tst_atomic_store(1, &pair->exit);
225 			usleep(100000);
226 
227 /*
228  * b/148414662
229  * Android does not support pthread_cancel. This mechanism is just used to avoid
230  * a timeout in a rare failure case and is not required for proper operation, so
231  * just skip it for now (effort will be made upstream to remove the use of
232  * pthread_cancel).
233  *		pthread_cancel(pair->thread_b);
234  */
235 		}
236 		SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
237 		pair->thread_b = 0;
238 	}
239 }
240 
241 /** To store the run_b pointer and pass to tst_fzsync_thread_wrapper */
242 struct tst_fzsync_run_thread {
243 	void *(*func)(void *);
244 	void *arg;
245 };
246 
247 /**
248  * Wrap run_b for tst_fzsync_pair_reset to enable pthread cancel
249  * at the start of the thread B.
250  */
tst_fzsync_thread_wrapper(void * run_thread)251 static void *tst_fzsync_thread_wrapper(void *run_thread)
252 {
253        struct tst_fzsync_run_thread t = *(struct tst_fzsync_run_thread *)run_thread;
254 
255 /*
256  * See above comment for b/148414662
257  *     pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
258  *     pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
259  */
260        return t.func(t.arg);
261 }
262 
263 /**
264  * Zero some stat fields
265  *
266  * @relates tst_fzsync_stat
267  */
tst_init_stat(struct tst_fzsync_stat * s)268 static void tst_init_stat(struct tst_fzsync_stat *s)
269 {
270 	s->avg = 0;
271 	s->avg_dev = 0;
272 }
273 
274 /**
275  * Reset or initialise fzsync.
276  *
277  * @relates tst_fzsync_pair
278  * @param pair The state structure initialised with TST_FZSYNC_PAIR_INIT.
279  * @param run_b The function defining thread B or NULL.
280  *
281  * Call this from your main test function (thread A), just before entering the
282  * main loop. It will (re)set any variables needed by fzsync and (re)start
283  * thread B using the function provided.
284  *
285  * If you need to use fork or clone to start the second thread/process then
286  * you can pass NULL to run_b and handle starting and stopping thread B
287  * yourself. You may need to place tst_fzsync_pair in some shared memory as
288  * well.
289  *
290  * @sa tst_fzsync_pair_init()
291  */
tst_fzsync_pair_reset(struct tst_fzsync_pair * pair,void * (* run_b)(void *))292 static void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
293 				  void *(*run_b)(void *))
294 {
295 	tst_fzsync_pair_cleanup(pair);
296 
297 	tst_init_stat(&pair->diff_ss);
298 	tst_init_stat(&pair->diff_sa);
299 	tst_init_stat(&pair->diff_sb);
300 	tst_init_stat(&pair->diff_ab);
301 	tst_init_stat(&pair->spins_avg);
302 	pair->delay = 0;
303 	pair->sampling = pair->min_samples;
304 
305 	pair->exec_loop = 0;
306 
307 	pair->a_cntr = 0;
308 	pair->b_cntr = 0;
309 	pair->exit = 0;
310 	if (run_b) {
311 		struct tst_fzsync_run_thread wrap_run_b = {.func = run_b, .arg = NULL};
312 		SAFE_PTHREAD_CREATE(&pair->thread_b, 0, tst_fzsync_thread_wrapper, &wrap_run_b);
313 	}
314 
315 	pair->exec_time_start = (float)tst_timeout_remaining();
316 }
317 
318 /**
319  * Print stat
320  *
321  * @relates tst_fzsync_stat
322  */
tst_fzsync_stat_info(struct tst_fzsync_stat stat,char * unit,char * name)323 static inline void tst_fzsync_stat_info(struct tst_fzsync_stat stat,
324 					char *unit, char *name)
325 {
326 	tst_res(TINFO,
327 		"%1$-17s: { avg = %3$5.0f%2$s, avg_dev = %4$5.0f%2$s, dev_ratio = %5$.2f }",
328 		name, unit, stat.avg, stat.avg_dev, stat.dev_ratio);
329 }
330 
331 /**
332  * Print some synchronisation statistics
333  *
334  * @relates tst_fzsync_pair
335  */
tst_fzsync_pair_info(struct tst_fzsync_pair * pair)336 static void tst_fzsync_pair_info(struct tst_fzsync_pair *pair)
337 {
338 	tst_res(TINFO, "loop = %d, delay_bias = %d",
339 		pair->exec_loop, pair->delay_bias);
340 	tst_fzsync_stat_info(pair->diff_ss, "ns", "start_a - start_b");
341 	tst_fzsync_stat_info(pair->diff_sa, "ns", "end_a - start_a");
342 	tst_fzsync_stat_info(pair->diff_sb, "ns", "end_b - start_b");
343 	tst_fzsync_stat_info(pair->diff_ab, "ns", "end_a - end_b");
344 	tst_fzsync_stat_info(pair->spins_avg, "  ", "spins");
345 }
346 
347 /** Wraps clock_gettime */
tst_fzsync_time(struct timespec * t)348 static inline void tst_fzsync_time(struct timespec *t)
349 {
350 #ifdef CLOCK_MONOTONIC_RAW
351 	clock_gettime(CLOCK_MONOTONIC_RAW, t);
352 #else
353 	clock_gettime(CLOCK_MONOTONIC, t);
354 #endif
355 }
356 
357 /**
358  * Exponential moving average
359  *
360  * @param alpha The preference for recent samples over old ones.
361  * @param sample The current sample
362  * @param prev_avg The average of the all the previous samples
363  *
364  * @return The average including the current sample.
365  */
tst_exp_moving_avg(float alpha,float sample,float prev_avg)366 static inline float tst_exp_moving_avg(float alpha,
367 					float sample,
368 					float prev_avg)
369 {
370 	return alpha * sample + (1.0 - alpha) * prev_avg;
371 }
372 
373 /**
374  * Update a stat with a new sample
375  *
376  * @relates tst_fzsync_stat
377  */
tst_upd_stat(struct tst_fzsync_stat * s,float alpha,float sample)378 static inline void tst_upd_stat(struct tst_fzsync_stat *s,
379 				 float alpha,
380 				 float sample)
381 {
382 	s->avg = tst_exp_moving_avg(alpha, sample, s->avg);
383 	s->avg_dev = tst_exp_moving_avg(alpha,
384 					fabs(s->avg - sample), s->avg_dev);
385 	s->dev_ratio = fabs(s->avg ? s->avg_dev / s->avg : 0);
386 }
387 
388 /**
389  * Update a stat with a new diff sample
390  *
391  * @relates tst_fzsync_stat
392  */
tst_upd_diff_stat(struct tst_fzsync_stat * s,float alpha,struct timespec t1,struct timespec t2)393 static inline void tst_upd_diff_stat(struct tst_fzsync_stat *s,
394 				     float alpha,
395 				     struct timespec t1,
396 				     struct timespec t2)
397 {
398 	tst_upd_stat(s, alpha, tst_timespec_diff_ns(t1, t2));
399 }
400 
401 /**
402  * Calculate various statistics and the delay
403  *
404  * This function helps create the fuzz in fuzzy sync. Imagine we have the
405  * following timelines in threads A and B:
406  *
407  *  start_race_a
408  *      ^                    end_race_a (a)
409  *      |                        ^
410  *      |                        |
411  *  - --+------------------------+-- - -
412  *      |        Syscall A       |                 Thread A
413  *  - --+------------------------+-- - -
414  *  - --+----------------+-------+-- - -
415  *      |   Syscall B    | spin  |                 Thread B
416  *  - --+----------------+-------+-- - -
417  *      |                |
418  *      ^                ^
419  *  start_race_b     end_race_b
420  *
421  * Here we have synchronised the calls to syscall A and B with start_race_{a,
422  * b} so that they happen at approximately the same time in threads A and
423  * B. If the race condition occurs during the entry code for these two
424  * functions then we will quickly hit it. If it occurs during the exit code of
425  * B and mid way through A, then we will quickly hit it.
426  *
427  * However if the exit paths of A and B need to be aligned and (end_race_a -
428  * end_race_b) is large relative to the variation in call times, the
429  * probability of hitting the race condition is close to zero. To solve this
430  * scenario (and others) a randomised delay is introduced before the syscalls
431  * in A and B. Given enough time the following should happen where the exit
432  * paths are now synchronised:
433  *
434  *  start_race_a
435  *      ^                    end_race_a (a)
436  *      |                        ^
437  *      |                        |
438  *  - --+------------------------+-- - -
439  *      |        Syscall A       |                 Thread A
440  *  - --+------------------------+-- - -
441  *  - --+-------+----------------+-- - -
442  *      | delay |   Syscall B    |                 Thread B
443  *  - --+-------+----------------+-- - -
444  *      |                        |
445  *      ^                        ^
446  *  start_race_b             end_race_b
447  *
448  * The delay is not introduced immediately and the delay range is only
449  * calculated once the average relative deviation has dropped below some
450  * percentage of the total time.
451  *
452  * The delay range is chosen so that any point in Syscall A could be
453  * synchronised with any point in Syscall B using a value from the
454  * range. Because the delay range may be too large for a linear search, we use
455  * an evenly distributed random function to pick a value from it.
456  *
457  * The delay range goes from positive to negative. A negative delay will delay
458  * thread A and a positive one will delay thread B. The range is bounded by
459  * the point where the entry code to Syscall A is synchronised with the exit
460  * to Syscall B and the entry code to Syscall B is synchronised with the exit
461  * of A.
462  *
463  * In order to calculate the lower bound (the max delay of A) we can simply
464  * negate the execution time of Syscall B and convert it to a spin count. For
465  * the upper bound (the max delay of B), we just take the execution time of A
466  * and convert it to a spin count.
467  *
468  * In order to calculate spin count we need to know approximately how long a
469  * spin takes and divide the delay time with it. We find this by first
470  * counting how many spins one thread spends waiting for the other during
471  * end_race[1]. We also know when each syscall exits so we can take the
472  * difference between the exit times and divide it with the number of spins
473  * spent waiting.
474  *
475  * All the times and counts we use in the calculation are averaged over a
476  * variable number of iterations. There is an initial sampling period where we
477  * simply collect time and count samples then calculate their averages. When a
478  * minimum number of samples have been collected, and if the average deviation
479  * is below some proportion of the average sample magnitude, then the sampling
480  * period is ended. On all further iterations a random delay is calculated and
481  * applied, but the averages are not updated.
482  *
483  * [1] This assumes there is always a significant difference. The algorithm
484  * may fail to introduce a delay (when one is needed) in situations where
485  * Syscall A and B finish at approximately the same time.
486  *
487  * @relates tst_fzsync_pair
488  */
tst_fzsync_pair_update(struct tst_fzsync_pair * pair)489 static void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
490 {
491 	float alpha = pair->avg_alpha;
492 	float per_spin_time, time_delay;
493 	float max_dev = pair->max_dev_ratio;
494 	int over_max_dev;
495 
496 	pair->delay = pair->delay_bias;
497 
498 	over_max_dev = pair->diff_ss.dev_ratio > max_dev
499 		|| pair->diff_sa.dev_ratio > max_dev
500 		|| pair->diff_sb.dev_ratio > max_dev
501 		|| pair->diff_ab.dev_ratio > max_dev
502 		|| pair->spins_avg.dev_ratio > max_dev;
503 
504 	if (pair->sampling > 0 || over_max_dev) {
505 		tst_upd_diff_stat(&pair->diff_ss, alpha,
506 				  pair->a_start, pair->b_start);
507 		tst_upd_diff_stat(&pair->diff_sa, alpha,
508 				  pair->a_end, pair->a_start);
509 		tst_upd_diff_stat(&pair->diff_sb, alpha,
510 				  pair->b_end, pair->b_start);
511 		tst_upd_diff_stat(&pair->diff_ab, alpha,
512 				  pair->a_end, pair->b_end);
513 		tst_upd_stat(&pair->spins_avg, alpha, pair->spins);
514 		if (pair->sampling > 0 && --pair->sampling == 0) {
515 			tst_res(TINFO, "Minimum sampling period ended");
516 			tst_fzsync_pair_info(pair);
517 		}
518 	} else if (fabsf(pair->diff_ab.avg) >= 1) {
519 		per_spin_time = fabsf(pair->diff_ab.avg) / MAX(pair->spins_avg.avg, 1.0f);
520 		time_delay = drand48() * (pair->diff_sa.avg + pair->diff_sb.avg)
521 			- pair->diff_sb.avg;
522 		pair->delay += (int)(time_delay / per_spin_time);
523 
524 		if (!pair->sampling) {
525 			tst_res(TINFO,
526 				"Reached deviation ratios < %.2f, introducing randomness",
527 				pair->max_dev_ratio);
528 			tst_res(TINFO, "Delay range is [-%d, %d]",
529 				(int)(pair->diff_sb.avg / per_spin_time) + pair->delay_bias,
530 				(int)(pair->diff_sa.avg / per_spin_time) - pair->delay_bias);
531 			tst_fzsync_pair_info(pair);
532 			pair->sampling = -1;
533 		}
534 	} else if (!pair->sampling) {
535 		tst_res(TWARN, "Can't calculate random delay");
536 		tst_fzsync_pair_info(pair);
537 		pair->sampling = -1;
538 	}
539 
540 	pair->spins = 0;
541 }
542 
543 /**
544  * Wait for the other thread
545  *
546  * @relates tst_fzsync_pair
547  * @param our_cntr The counter for the thread we are on
548  * @param other_cntr The counter for the thread we are synchronising with
549  * @param spins A pointer to the spin counter or NULL
550  *
551  * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
552  * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
553  * thread, then it will spin wait. Unlike pthread_barrier_wait it will never
554  * use futex and can count the number of spins spent waiting.
555  *
556  * @return A non-zero value if the thread should continue otherwise the
557  * calling thread should exit.
558  */
tst_fzsync_pair_wait(int * our_cntr,int * other_cntr,int * spins)559 static inline void tst_fzsync_pair_wait(int *our_cntr,
560 					int *other_cntr,
561 					int *spins)
562 {
563 	if (tst_atomic_inc(other_cntr) == INT_MAX) {
564 		/*
565 		 * We are about to break the invariant that the thread with
566 		 * the lowest count is in front of the other. So we must wait
567 		 * here to ensure the other thread has at least reached the
568 		 * line above before doing that. If we are in rear position
569 		 * then our counter may already have been set to zero.
570 		 */
571 		while (tst_atomic_load(our_cntr) > 0
572 		       && tst_atomic_load(our_cntr) < INT_MAX) {
573 			if (spins)
574 				(*spins)++;
575 		}
576 
577 		tst_atomic_store(0, other_cntr);
578 		/*
579 		 * Once both counters have been set to zero the invariant
580 		 * is restored and we can continue.
581 		 */
582 		while (tst_atomic_load(our_cntr) > 1)
583 			;
584 	} else {
585 		/*
586 		 * If our counter is less than the other thread's we are ahead
587 		 * of it and need to wait.
588 		 */
589 		while (tst_atomic_load(our_cntr) < tst_atomic_load(other_cntr)) {
590 			if (spins)
591 				(*spins)++;
592 		}
593 	}
594 }
595 
596 /**
597  * Wait in thread A
598  *
599  * @relates tst_fzsync_pair
600  * @sa tst_fzsync_pair_wait
601  */
tst_fzsync_wait_a(struct tst_fzsync_pair * pair)602 static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
603 {
604 	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, NULL);
605 }
606 
607 /**
608  * Wait in thread B
609  *
610  * @relates tst_fzsync_pair
611  * @sa tst_fzsync_pair_wait
612  */
tst_fzsync_wait_b(struct tst_fzsync_pair * pair)613 static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
614 {
615 	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, NULL);
616 }
617 
618 /**
619  * Decide whether to continue running thread A
620  *
621  * @relates tst_fzsync_pair
622  *
623  * Checks some values and decides whether it is time to break the loop of
624  * thread A.
625  *
626  * @return True to continue and false to break.
627  * @sa tst_fzsync_run_a
628  */
tst_fzsync_run_a(struct tst_fzsync_pair * pair)629 static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
630 {
631 	int exit = 0;
632 	float rem_p = 1 - tst_timeout_remaining() / pair->exec_time_start;
633 
634 	if ((pair->exec_time_p * SAMPLING_SLICE < rem_p)
635 		&& (pair->sampling > 0)) {
636 		tst_res(TINFO, "Stopped sampling at %d (out of %d) samples, "
637 			"sampling time reached 50%% of the total time limit",
638 			pair->exec_loop, pair->min_samples);
639 		pair->sampling = 0;
640 		tst_fzsync_pair_info(pair);
641 	}
642 
643 	if (pair->exec_time_p < rem_p) {
644 		tst_res(TINFO,
645 			"Exceeded execution time, requesting exit");
646 		exit = 1;
647 	}
648 
649 	if (++pair->exec_loop > pair->exec_loops) {
650 		tst_res(TINFO,
651 			"Exceeded execution loops, requesting exit");
652 		exit = 1;
653 	}
654 
655 	tst_atomic_store(exit, &pair->exit);
656 	tst_fzsync_wait_a(pair);
657 
658 	if (exit) {
659 		tst_fzsync_pair_cleanup(pair);
660 		return 0;
661 	}
662 
663 	return 1;
664 }
665 
666 /**
667  * Decide whether to continue running thread B
668  *
669  * @relates tst_fzsync_pair
670  * @sa tst_fzsync_run_a
671  */
tst_fzsync_run_b(struct tst_fzsync_pair * pair)672 static inline int tst_fzsync_run_b(struct tst_fzsync_pair *pair)
673 {
674 	tst_fzsync_wait_b(pair);
675 	return !tst_atomic_load(&pair->exit);
676 }
677 
678 /**
679  * Marks the start of a race region in thread A
680  *
681  * @relates tst_fzsync_pair
682  *
683  * This should be placed just before performing whatever action can cause a
684  * race condition. Usually it is placed just before a syscall and
685  * tst_fzsync_end_race_a() is placed just afterwards.
686  *
687  * A corresponding call to tst_fzsync_start_race_b() should be made in thread
688  * B.
689  *
690  * @return A non-zero value if the calling thread should continue to loop. If
691  * it returns zero then tst_fzsync_exit() has been called and you must exit
692  * the thread.
693  *
694  * @sa tst_fzsync_pair_update
695  */
tst_fzsync_start_race_a(struct tst_fzsync_pair * pair)696 static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
697 {
698 	volatile int delay;
699 
700 	tst_fzsync_pair_update(pair);
701 
702 	tst_fzsync_wait_a(pair);
703 
704 	delay = pair->delay;
705 	while (delay < 0)
706 		delay++;
707 
708 	tst_fzsync_time(&pair->a_start);
709 }
710 
711 /**
712  * Marks the end of a race region in thread A
713  *
714  * @relates tst_fzsync_pair
715  * @sa tst_fzsync_start_race_a
716  */
tst_fzsync_end_race_a(struct tst_fzsync_pair * pair)717 static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
718 {
719 	tst_fzsync_time(&pair->a_end);
720 	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr, &pair->spins);
721 }
722 
723 /**
724  * Marks the start of a race region in thread B
725  *
726  * @relates tst_fzsync_pair
727  * @sa tst_fzsync_start_race_a
728  */
tst_fzsync_start_race_b(struct tst_fzsync_pair * pair)729 static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
730 {
731 	volatile int delay;
732 
733 	tst_fzsync_wait_b(pair);
734 
735 	delay = pair->delay;
736 	while (delay > 0)
737 		delay--;
738 
739 	tst_fzsync_time(&pair->b_start);
740 }
741 
742 /**
743  * Marks the end of a race region in thread B
744  *
745  * @relates tst_fzsync_pair
746  * @sa tst_fzsync_start_race_a
747  */
tst_fzsync_end_race_b(struct tst_fzsync_pair * pair)748 static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
749 {
750 	tst_fzsync_time(&pair->b_end);
751 	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr, &pair->spins);
752 }
753 
754 /**
755  * Add some amount to the delay bias
756  *
757  * @relates tst_fzsync_pair
758  * @param change The amount to add, can be negative
759  *
760  * A positive change delays thread B and a negative one delays thread
761  * A.
762  *
763  * It is intended to be used in tests where the time taken by syscall A and/or
764  * B are significantly affected by their chronological order. To the extent
765  * that the delay range will not include the correct values if too many of the
766  * initial samples are taken when the syscalls (or operations within the
767  * syscalls) happen in the wrong order.
768  *
769  * An example of this is cve/cve-2016-7117.c where a call to close() is racing
770  * with a call to recvmmsg(). If close() happens before recvmmsg() has chance
771  * to check if the file descriptor is open then recvmmsg() completes very
772  * quickly. If the call to close() happens once recvmmsg() has already checked
773  * the descriptor it takes much longer. The sample where recvmmsg() completes
774  * quickly is essentially invalid for our purposes. The test uses the simple
775  * heuristic of whether recvmmsg() returns EBADF, to decide if it should call
776  * tst_fzsync_pair_add_bias() to further delay syscall B.
777  */
tst_fzsync_pair_add_bias(struct tst_fzsync_pair * pair,int change)778 static inline void tst_fzsync_pair_add_bias(struct tst_fzsync_pair *pair, int change)
779 {
780 	if (pair->sampling > 0)
781 		pair->delay_bias += change;
782 }
783 
784 #endif /* TST_FUZZY_SYNC_H__ */
785