• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * Copyright (c) 2017-2018 Richard Palethorpe <rpalethorpe@suse.com>
4  */
5 /**
6  * @file tst_fuzzy_sync.h
7  * Fuzzy Synchronisation - abbreviated to fzsync
8  *
9  * This library is intended to help reproduce race conditions by synchronising
10  * two threads at a given place by marking the range a race may occur
11  * in. Because the exact place where any race occurs is within the kernel,
12  * and therefore impossible to mark accurately, the library may add randomised
13  * delays to either thread in order to help find the exact race timing.
14  *
15  * Currently only two way races are explicitly supported, that is races
16  * involving two threads or processes. We refer to the main test thread as
17  * thread A and the child thread as thread B.
18  *
19  * In each thread you need a simple while- or for-loop which the tst_fzsync_*
20  * functions are called in. In the simplest case thread A will look something
21  * like:
22  *
23  * tst_fzsync_pair_reset(&pair, run_thread_b);
24  * while (tst_fzsync_run_a(&pair)) {
25  *	// Perform some setup which must happen before the race
26  *	tst_fzsync_start_race_a(&pair);
27  *	// Do some dodgy syscall
28  *	tst_fzsync_end_race_a(&pair);
29  * }
30  *
31  * Then in thread B (run_thread_b):
32  *
33  * while (tst_fzsync_run_b(&pair)) {
34  *	tst_fzsync_start_race_b(&pair);
35  *	// Do something which can race with the dodgy syscall in A
36  *	tst_fzsync_end_race_b(&pair)
37  * }
38  *
39  * The calls to tst_fzsync_start/end_race and tst_fzsync_run_a/b block (at
40  * least) until both threads have enter them. These functions can only be
41  * called once for each iteration, but further synchronisation points can be
42  * added by calling tst_fzsync_wait_a() and tst_fzsync_wait_b() in each
43  * thread.
44  *
45  * The execution of the loops in threads A and B are bounded by both iteration
46  * count and time. A slow machine is likely to be limited by time and a fast
47  * one by iteration count. The user can use the -i parameter to run the test
48  * multiple times or LTP_TIMEOUT_MUL to give the test more time.
49  *
50  * It is possible to use the library just for tst_fzsync_pair_wait() to get a
51  * basic spin wait. However if you are actually testing a race condition then
52  * it is recommended to use tst_fzsync_start_race_a/b even if the
53  * randomisation is not needed. It provides some semantic information which
54  * may be useful in the future.
55  *
56  * For a usage example see testcases/cve/cve-2016-7117.c or just run
57  * 'git grep tst_fuzzy_sync.h'
58  *
59  * @sa tst_fzsync_pair
60  */
61 
62 #include <math.h>
63 #include <stdbool.h>
64 #include <stdlib.h>
65 #include <sys/time.h>
66 #include <time.h>
67 #include "tst_atomic.h"
68 #include "tst_cpu.h"
69 #include "tst_timer.h"
70 #include "tst_safe_pthread.h"
71 
72 #ifndef TST_FUZZY_SYNC_H__
73 #define TST_FUZZY_SYNC_H__
74 
75 /* how much of exec time is sampling allowed to take */
76 #define SAMPLING_SLICE 0.5f
77 
78 /** Some statistics for a variable */
79 struct tst_fzsync_stat {
80 	float avg;
81 	float avg_dev;
82 	float dev_ratio;
83 };
84 
85 /**
86  * The state of a two way synchronisation or race.
87  *
88  * This contains all the necessary state for approximately synchronising two
89  * sections of code in different threads.
90  *
91  * Some of the fields can be configured before calling
92  * tst_fzsync_pair_reset(), however this is mainly for debugging purposes. If
93  * a test requires one of the parameters to be modified, we should consider
94  * finding a way of automatically selecting an appropriate value at runtime.
95  *
96  * Internal fields should only be accessed by library functions.
97  */
98 struct tst_fzsync_pair {
99 	/**
100 	 * The rate at which old diff samples are forgotten
101 	 *
102 	 * Defaults to 0.25.
103 	 */
104 	float avg_alpha;
105 	/** Internal; Thread A start time */
106 	struct timespec a_start;
107 	/** Internal; Thread B start time */
108 	struct timespec b_start;
109 	/** Internal; Thread A end time */
110 	struct timespec a_end;
111 	/** Internal; Thread B end time */
112 	struct timespec b_end;
113 	/** Internal; Avg. difference between a_start and b_start */
114 	struct tst_fzsync_stat diff_ss;
115 	/** Internal; Avg. difference between a_start and a_end */
116 	struct tst_fzsync_stat diff_sa;
117 	/** Internal; Avg. difference between b_start and b_end */
118 	struct tst_fzsync_stat diff_sb;
119 	/** Internal; Avg. difference between a_end and b_end */
120 	struct tst_fzsync_stat diff_ab;
121 	/** Internal; Number of spins while waiting for the slower thread */
122 	int spins;
123 	struct tst_fzsync_stat spins_avg;
124 	/**
125 	 * Internal; Number of spins to use in the delay.
126 	 *
127 	 * A negative value delays thread A and a positive delays thread B.
128 	 */
129 	int delay;
130 	int delay_bias;
131 	/**
132 	 *  Internal; The number of samples left or the sampling state.
133 	 *
134 	 *  A positive value is the number of remaining mandatory
135 	 *  samples. Zero or a negative indicate some other state.
136 	 */
137 	int sampling;
138 	/**
139 	 * The Minimum number of statistical samples which must be collected.
140 	 *
141 	 * The minimum number of iterations which must be performed before a
142 	 * random delay can be calculated. Defaults to 1024.
143 	 */
144 	int min_samples;
145 	/**
146 	 * The maximum allowed proportional average deviation.
147 	 *
148 	 * A value in the range (0, 1) which gives the maximum average
149 	 * deviation which must be attained before random delays can be
150 	 * calculated.
151 	 *
152 	 * It is a ratio of (average_deviation / total_time). The default is
153 	 * 0.1, so this allows an average deviation of at most 10%.
154 	 */
155 	float max_dev_ratio;
156 
157 	/** Internal; Atomic counter used by fzsync_pair_wait() */
158 	int a_cntr;
159 	/** Internal; Atomic counter used by fzsync_pair_wait() */
160 	int b_cntr;
161 	/** Internal; Used by tst_fzsync_pair_exit() and fzsync_pair_wait() */
162 	int exit;
163 	/** Internal; The test time remaining on tst_fzsync_pair_reset() */
164 	float exec_time_start;
165 	/**
166 	 * The maximum number of iterations to execute during the test
167 	 *
168 	 * Defaults to a large number, but not too large.
169 	 */
170 	int exec_loops;
171 	/** Internal; The current loop index  */
172 	int exec_loop;
173 	/** Internal; The second thread or 0 */
174 	pthread_t thread_b;
175 	/**
176 	 * The flag indicates single core machines or not
177 	 *
178 	 * If running on single core machines, it would take considerable
179 	 * amount of time to run fuzzy sync library.
180 	 * Thus call sched_yield to give up cpu to decrease the test time.
181 	 */
182 	bool yield_in_wait;
183 
184 };
185 
186 #define CHK(param, low, hi, def) do {					      \
187 	pair->param = (pair->param ? pair->param : def);		      \
188 	if (pair->param < low)						      \
189 		tst_brk(TBROK, #param " is less than the lower bound " #low); \
190 	if (pair->param > hi)						      \
191 		tst_brk(TBROK, #param " is more than the upper bound " #hi);  \
192 	} while (0)
193 /**
194  * Ensures that any Fuzzy Sync parameters are properly set
195  *
196  * @relates tst_fzsync_pair
197  *
198  * Usually called from the setup function, it sets default parameter values or
199  * validates any existing non-defaults.
200  *
201  * @sa tst_fzsync_pair_reset()
202  */
tst_fzsync_pair_init(struct tst_fzsync_pair * pair)203 static inline void tst_fzsync_pair_init(struct tst_fzsync_pair *pair)
204 {
205 	CHK(avg_alpha, 0, 1, 0.25);
206 	CHK(min_samples, 20, INT_MAX, 1024);
207 	CHK(max_dev_ratio, 0, 1, 0.1);
208 	CHK(exec_loops, 20, INT_MAX, 3000000);
209 
210 	if (tst_ncpus_available() <= 1)
211 		pair->yield_in_wait = 1;
212 }
213 #undef CHK
214 
215 /**
216  * Exit and join thread B if necessary.
217  *
218  * @relates tst_fzsync_pair
219  *
220  * Call this from your cleanup function.
221  */
tst_fzsync_pair_cleanup(struct tst_fzsync_pair * pair)222 static inline void tst_fzsync_pair_cleanup(struct tst_fzsync_pair *pair)
223 {
224 	if (pair->thread_b) {
225 		/* Revoke thread B if parent hits accidental break */
226 		if (!pair->exit)
227 			tst_atomic_store(1, &pair->exit);
228 		SAFE_PTHREAD_JOIN(pair->thread_b, NULL);
229 		pair->thread_b = 0;
230 	}
231 }
232 
233 /**
234  * Zero some stat fields
235  *
236  * @relates tst_fzsync_stat
237  */
tst_init_stat(struct tst_fzsync_stat * s)238 static inline void tst_init_stat(struct tst_fzsync_stat *s)
239 {
240 	s->avg = 0;
241 	s->avg_dev = 0;
242 }
243 
244 /**
245  * Reset or initialise fzsync.
246  *
247  * @relates tst_fzsync_pair
248  * @param pair The state structure initialised with TST_FZSYNC_PAIR_INIT.
249  * @param run_b The function defining thread B or NULL.
250  *
251  * Call this from your main test function (thread A), just before entering the
252  * main loop. It will (re)set any variables needed by fzsync and (re)start
253  * thread B using the function provided.
254  *
255  * If you need to use fork or clone to start the second thread/process then
256  * you can pass NULL to run_b and handle starting and stopping thread B
257  * yourself. You may need to place tst_fzsync_pair in some shared memory as
258  * well.
259  *
260  * @sa tst_fzsync_pair_init()
261  */
tst_fzsync_pair_reset(struct tst_fzsync_pair * pair,void * (* run_b)(void *))262 static inline void tst_fzsync_pair_reset(struct tst_fzsync_pair *pair,
263 				  void *(*run_b)(void *))
264 {
265 	tst_fzsync_pair_cleanup(pair);
266 
267 	tst_init_stat(&pair->diff_ss);
268 	tst_init_stat(&pair->diff_sa);
269 	tst_init_stat(&pair->diff_sb);
270 	tst_init_stat(&pair->diff_ab);
271 	tst_init_stat(&pair->spins_avg);
272 	pair->delay = 0;
273 	pair->delay_bias = 0;
274 	pair->sampling = pair->min_samples;
275 
276 	pair->exec_loop = 0;
277 
278 	pair->a_cntr = 0;
279 	pair->b_cntr = 0;
280 	pair->exit = 0;
281 	if (run_b)
282 		SAFE_PTHREAD_CREATE(&pair->thread_b, 0, run_b, 0);
283 
284 	pair->exec_time_start = (float)tst_remaining_runtime();
285 }
286 
287 /**
288  * Print stat
289  *
290  * @relates tst_fzsync_stat
291  */
tst_fzsync_stat_info(struct tst_fzsync_stat stat,char * unit,char * name)292 static inline void tst_fzsync_stat_info(struct tst_fzsync_stat stat,
293 					char *unit, char *name)
294 {
295 	tst_res(TINFO,
296 		"%1$-17s: { avg = %3$5.0f%2$s, avg_dev = %4$5.0f%2$s, dev_ratio = %5$.2f }",
297 		name, unit, stat.avg, stat.avg_dev, stat.dev_ratio);
298 }
299 
300 /**
301  * Print some synchronisation statistics
302  *
303  * @relates tst_fzsync_pair
304  */
tst_fzsync_pair_info(struct tst_fzsync_pair * pair)305 static inline void tst_fzsync_pair_info(struct tst_fzsync_pair *pair)
306 {
307 	tst_res(TINFO, "loop = %d, delay_bias = %d",
308 		pair->exec_loop, pair->delay_bias);
309 	tst_fzsync_stat_info(pair->diff_ss, "ns", "start_a - start_b");
310 	tst_fzsync_stat_info(pair->diff_sa, "ns", "end_a - start_a");
311 	tst_fzsync_stat_info(pair->diff_sb, "ns", "end_b - start_b");
312 	tst_fzsync_stat_info(pair->diff_ab, "ns", "end_a - end_b");
313 	tst_fzsync_stat_info(pair->spins_avg, "  ", "spins");
314 }
315 
316 /** Wraps clock_gettime */
tst_fzsync_time(struct timespec * t)317 static inline void tst_fzsync_time(struct timespec *t)
318 {
319 #ifdef CLOCK_MONOTONIC_RAW
320 	clock_gettime(CLOCK_MONOTONIC_RAW, t);
321 #else
322 	clock_gettime(CLOCK_MONOTONIC, t);
323 #endif
324 }
325 
326 /**
327  * Exponential moving average
328  *
329  * @param alpha The preference for recent samples over old ones.
330  * @param sample The current sample
331  * @param prev_avg The average of the all the previous samples
332  *
333  * @return The average including the current sample.
334  */
tst_exp_moving_avg(float alpha,float sample,float prev_avg)335 static inline float tst_exp_moving_avg(float alpha,
336 					float sample,
337 					float prev_avg)
338 {
339 	return alpha * sample + (1.0 - alpha) * prev_avg;
340 }
341 
342 /**
343  * Update a stat with a new sample
344  *
345  * @relates tst_fzsync_stat
346  */
tst_upd_stat(struct tst_fzsync_stat * s,float alpha,float sample)347 static inline void tst_upd_stat(struct tst_fzsync_stat *s,
348 				 float alpha,
349 				 float sample)
350 {
351 	s->avg = tst_exp_moving_avg(alpha, sample, s->avg);
352 	s->avg_dev = tst_exp_moving_avg(alpha,
353 					fabs(s->avg - sample), s->avg_dev);
354 	s->dev_ratio = fabs(s->avg ? s->avg_dev / s->avg : 0);
355 }
356 
357 /**
358  * Update a stat with a new diff sample
359  *
360  * @relates tst_fzsync_stat
361  */
tst_upd_diff_stat(struct tst_fzsync_stat * s,float alpha,struct timespec t1,struct timespec t2)362 static inline void tst_upd_diff_stat(struct tst_fzsync_stat *s,
363 				     float alpha,
364 				     struct timespec t1,
365 				     struct timespec t2)
366 {
367 	tst_upd_stat(s, alpha, tst_timespec_diff_ns(t1, t2));
368 }
369 
370 /**
371  * Calculate various statistics and the delay
372  *
373  * This function helps create the fuzz in fuzzy sync. Imagine we have the
374  * following timelines in threads A and B:
375  *
376  *  start_race_a
377  *      ^                    end_race_a (a)
378  *      |                        ^
379  *      |                        |
380  *  - --+------------------------+-- - -
381  *      |        Syscall A       |                 Thread A
382  *  - --+------------------------+-- - -
383  *  - --+----------------+-------+-- - -
384  *      |   Syscall B    | spin  |                 Thread B
385  *  - --+----------------+-------+-- - -
386  *      |                |
387  *      ^                ^
388  *  start_race_b     end_race_b
389  *
390  * Here we have synchronised the calls to syscall A and B with start_race_{a,
391  * b} so that they happen at approximately the same time in threads A and
392  * B. If the race condition occurs during the entry code for these two
393  * functions then we will quickly hit it. If it occurs during the exit code of
394  * B and mid way through A, then we will quickly hit it.
395  *
396  * However if the exit paths of A and B need to be aligned and (end_race_a -
397  * end_race_b) is large relative to the variation in call times, the
398  * probability of hitting the race condition is close to zero. To solve this
399  * scenario (and others) a randomised delay is introduced before the syscalls
400  * in A and B. Given enough time the following should happen where the exit
401  * paths are now synchronised:
402  *
403  *  start_race_a
404  *      ^                    end_race_a (a)
405  *      |                        ^
406  *      |                        |
407  *  - --+------------------------+-- - -
408  *      |        Syscall A       |                 Thread A
409  *  - --+------------------------+-- - -
410  *  - --+-------+----------------+-- - -
411  *      | delay |   Syscall B    |                 Thread B
412  *  - --+-------+----------------+-- - -
413  *      |                        |
414  *      ^                        ^
415  *  start_race_b             end_race_b
416  *
417  * The delay is not introduced immediately and the delay range is only
418  * calculated once the average relative deviation has dropped below some
419  * percentage of the total time.
420  *
421  * The delay range is chosen so that any point in Syscall A could be
422  * synchronised with any point in Syscall B using a value from the
423  * range. Because the delay range may be too large for a linear search, we use
424  * an evenly distributed random function to pick a value from it.
425  *
426  * The delay range goes from positive to negative. A negative delay will delay
427  * thread A and a positive one will delay thread B. The range is bounded by
428  * the point where the entry code to Syscall A is synchronised with the exit
429  * to Syscall B and the entry code to Syscall B is synchronised with the exit
430  * of A.
431  *
432  * In order to calculate the lower bound (the max delay of A) we can simply
433  * negate the execution time of Syscall B and convert it to a spin count. For
434  * the upper bound (the max delay of B), we just take the execution time of A
435  * and convert it to a spin count.
436  *
437  * In order to calculate spin count we need to know approximately how long a
438  * spin takes and divide the delay time with it. We find this by first
439  * counting how many spins one thread spends waiting for the other during
440  * end_race[1]. We also know when each syscall exits so we can take the
441  * difference between the exit times and divide it with the number of spins
442  * spent waiting.
443  *
444  * All the times and counts we use in the calculation are averaged over a
445  * variable number of iterations. There is an initial sampling period where we
446  * simply collect time and count samples then calculate their averages. When a
447  * minimum number of samples have been collected, and if the average deviation
448  * is below some proportion of the average sample magnitude, then the sampling
449  * period is ended. On all further iterations a random delay is calculated and
450  * applied, but the averages are not updated.
451  *
452  * [1] This assumes there is always a significant difference. The algorithm
453  * may fail to introduce a delay (when one is needed) in situations where
454  * Syscall A and B finish at approximately the same time.
455  *
456  * @relates tst_fzsync_pair
457  */
tst_fzsync_pair_update(struct tst_fzsync_pair * pair)458 static inline void tst_fzsync_pair_update(struct tst_fzsync_pair *pair)
459 {
460 	float alpha = pair->avg_alpha;
461 	float per_spin_time, time_delay;
462 	float max_dev = pair->max_dev_ratio;
463 	int over_max_dev;
464 
465 	pair->delay = pair->delay_bias;
466 
467 	over_max_dev = pair->diff_ss.dev_ratio > max_dev
468 		|| pair->diff_sa.dev_ratio > max_dev
469 		|| pair->diff_sb.dev_ratio > max_dev
470 		|| pair->diff_ab.dev_ratio > max_dev
471 		|| pair->spins_avg.dev_ratio > max_dev;
472 
473 	if (pair->sampling > 0 || over_max_dev) {
474 		tst_upd_diff_stat(&pair->diff_ss, alpha,
475 				  pair->a_start, pair->b_start);
476 		tst_upd_diff_stat(&pair->diff_sa, alpha,
477 				  pair->a_end, pair->a_start);
478 		tst_upd_diff_stat(&pair->diff_sb, alpha,
479 				  pair->b_end, pair->b_start);
480 		tst_upd_diff_stat(&pair->diff_ab, alpha,
481 				  pair->a_end, pair->b_end);
482 		tst_upd_stat(&pair->spins_avg, alpha, pair->spins);
483 		if (pair->sampling > 0 && --pair->sampling == 0) {
484 			tst_res(TINFO, "Minimum sampling period ended");
485 			tst_fzsync_pair_info(pair);
486 		}
487 	} else if (fabsf(pair->diff_ab.avg) >= 1) {
488 		per_spin_time = fabsf(pair->diff_ab.avg) / MAX(pair->spins_avg.avg, 1.0f);
489 		time_delay = drand48() * (pair->diff_sa.avg + pair->diff_sb.avg)
490 			- pair->diff_sb.avg;
491 		pair->delay += (int)(1.1 * time_delay / per_spin_time);
492 
493 		if (!pair->sampling) {
494 			tst_res(TINFO,
495 				"Reached deviation ratios < %.2f, introducing randomness",
496 				pair->max_dev_ratio);
497 			tst_res(TINFO, "Delay range is [%d, %d]",
498 				-(int)(pair->diff_sb.avg / per_spin_time) + pair->delay_bias,
499 				(int)(pair->diff_sa.avg / per_spin_time) + pair->delay_bias);
500 			tst_fzsync_pair_info(pair);
501 			pair->sampling = -1;
502 		}
503 	} else if (!pair->sampling) {
504 		tst_res(TWARN, "Can't calculate random delay");
505 		tst_fzsync_pair_info(pair);
506 		pair->sampling = -1;
507 	}
508 
509 	pair->spins = 0;
510 }
511 
512 /**
513  * Wait for the other thread
514  *
515  * @relates tst_fzsync_pair
516  * @param our_cntr The counter for the thread we are on
517  * @param other_cntr The counter for the thread we are synchronising with
518  * @param spins A pointer to the spin counter or NULL
519  * @param exit Exit flag when we need to break out of the wait loop
520  *
521  * Used by tst_fzsync_pair_wait_a(), tst_fzsync_pair_wait_b(),
522  * tst_fzsync_start_race_a(), etc. If the calling thread is ahead of the other
523  * thread, then it will spin wait. Unlike pthread_barrier_wait it will never
524  * use futex and can count the number of spins spent waiting.
525  *
526  * @return A non-zero value if the thread should continue otherwise the
527  * calling thread should exit.
528  */
tst_fzsync_pair_wait(int * our_cntr,int * other_cntr,int * spins,int * exit,bool yield_in_wait)529 static inline void tst_fzsync_pair_wait(int *our_cntr,
530 					int *other_cntr,
531 					int *spins,
532 					int *exit,
533 					bool yield_in_wait)
534 {
535 	if (tst_atomic_inc(other_cntr) == INT_MAX) {
536 		/*
537 		 * We are about to break the invariant that the thread with
538 		 * the lowest count is in front of the other. So we must wait
539 		 * here to ensure the other thread has at least reached the
540 		 * line above before doing that. If we are in rear position
541 		 * then our counter may already have been set to zero.
542 		 */
543 		if (yield_in_wait) {
544 			while (tst_atomic_load(our_cntr) > 0
545 			       && tst_atomic_load(our_cntr) < INT_MAX
546 			       && !tst_atomic_load(exit)) {
547 				if (spins)
548 					(*spins)++;
549 
550 				sched_yield();
551 			}
552 		} else {
553 			while (tst_atomic_load(our_cntr) > 0
554 			       && tst_atomic_load(our_cntr) < INT_MAX
555 			       && !tst_atomic_load(exit)) {
556 				if (spins)
557 					(*spins)++;
558 			}
559 		}
560 
561 
562 		tst_atomic_store(0, other_cntr);
563 		/*
564 		 * Once both counters have been set to zero the invariant
565 		 * is restored and we can continue.
566 		 */
567 		if (yield_in_wait) {
568 			while (tst_atomic_load(our_cntr) > 1
569 			       && !tst_atomic_load(exit))
570 				sched_yield();
571 		} else {
572 			while (tst_atomic_load(our_cntr) > 1
573 			       && !tst_atomic_load(exit))
574 				;
575 		}
576 	} else {
577 		/*
578 		 * If our counter is less than the other thread's we are ahead
579 		 * of it and need to wait.
580 		 */
581 		if (yield_in_wait) {
582 			while (tst_atomic_load(our_cntr) <
583 			       tst_atomic_load(other_cntr)
584 			       && !tst_atomic_load(exit)) {
585 				if (spins)
586 					(*spins)++;
587 				sched_yield();
588 			}
589 		} else {
590 			while (tst_atomic_load(our_cntr) <
591 			       tst_atomic_load(other_cntr)
592 			       && !tst_atomic_load(exit)) {
593 				if (spins)
594 					(*spins)++;
595 			}
596 		}
597 	}
598 }
599 
600 /**
601  * Wait in thread A
602  *
603  * @relates tst_fzsync_pair
604  * @sa tst_fzsync_pair_wait
605  */
tst_fzsync_wait_a(struct tst_fzsync_pair * pair)606 static inline void tst_fzsync_wait_a(struct tst_fzsync_pair *pair)
607 {
608 	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
609 			     NULL, &pair->exit, pair->yield_in_wait);
610 }
611 
612 /**
613  * Wait in thread B
614  *
615  * @relates tst_fzsync_pair
616  * @sa tst_fzsync_pair_wait
617  */
tst_fzsync_wait_b(struct tst_fzsync_pair * pair)618 static inline void tst_fzsync_wait_b(struct tst_fzsync_pair *pair)
619 {
620 	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
621 			     NULL, &pair->exit, pair->yield_in_wait);
622 }
623 
624 /**
625  * Decide whether to continue running thread A
626  *
627  * @relates tst_fzsync_pair
628  *
629  * Checks some values and decides whether it is time to break the loop of
630  * thread A.
631  *
632  * @return True to continue and false to break.
633  * @sa tst_fzsync_run_a
634  */
tst_fzsync_run_a(struct tst_fzsync_pair * pair)635 static inline int tst_fzsync_run_a(struct tst_fzsync_pair *pair)
636 {
637 	float rem_p = 1 - tst_remaining_runtime() / pair->exec_time_start;
638 
639 	if ((SAMPLING_SLICE < rem_p) && (pair->sampling > 0)) {
640 		tst_res(TINFO, "Stopped sampling at %d (out of %d) samples, "
641 			"sampling time reached 50%% of the total time limit",
642 			pair->exec_loop, pair->min_samples);
643 		pair->sampling = 0;
644 		tst_fzsync_pair_info(pair);
645 	}
646 
647 	if (rem_p >= 1) {
648 		tst_res(TINFO,
649 			"Exceeded execution time, requesting exit");
650 		tst_atomic_store(1, &pair->exit);
651 	}
652 
653 	if (++pair->exec_loop > pair->exec_loops) {
654 		tst_res(TINFO,
655 			"Exceeded execution loops, requesting exit");
656 		tst_atomic_store(1, &pair->exit);
657 	}
658 
659 	tst_fzsync_wait_a(pair);
660 
661 	if (pair->exit) {
662 		tst_fzsync_pair_cleanup(pair);
663 		return 0;
664 	}
665 
666 	return 1;
667 }
668 
669 /**
670  * Decide whether to continue running thread B
671  *
672  * @relates tst_fzsync_pair
673  * @sa tst_fzsync_run_a
674  */
tst_fzsync_run_b(struct tst_fzsync_pair * pair)675 static inline int tst_fzsync_run_b(struct tst_fzsync_pair *pair)
676 {
677 	tst_fzsync_wait_b(pair);
678 	return !tst_atomic_load(&pair->exit);
679 }
680 
681 /**
682  * Marks the start of a race region in thread A
683  *
684  * @relates tst_fzsync_pair
685  *
686  * This should be placed just before performing whatever action can cause a
687  * race condition. Usually it is placed just before a syscall and
688  * tst_fzsync_end_race_a() is placed just afterwards.
689  *
690  * A corresponding call to tst_fzsync_start_race_b() should be made in thread
691  * B.
692  *
693  * @return A non-zero value if the calling thread should continue to loop. If
694  * it returns zero then tst_fzsync_exit() has been called and you must exit
695  * the thread.
696  *
697  * @sa tst_fzsync_pair_update
698  */
tst_fzsync_start_race_a(struct tst_fzsync_pair * pair)699 static inline void tst_fzsync_start_race_a(struct tst_fzsync_pair *pair)
700 {
701 	volatile int delay;
702 
703 	tst_fzsync_pair_update(pair);
704 
705 	tst_fzsync_wait_a(pair);
706 
707 	delay = pair->delay;
708 	if (pair->yield_in_wait) {
709 		while (delay < 0) {
710 			sched_yield();
711 			delay++;
712 		}
713 	} else {
714 		while (delay < 0)
715 			delay++;
716 	}
717 
718 	tst_fzsync_time(&pair->a_start);
719 }
720 
721 /**
722  * Marks the end of a race region in thread A
723  *
724  * @relates tst_fzsync_pair
725  * @sa tst_fzsync_start_race_a
726  */
tst_fzsync_end_race_a(struct tst_fzsync_pair * pair)727 static inline void tst_fzsync_end_race_a(struct tst_fzsync_pair *pair)
728 {
729 	tst_fzsync_time(&pair->a_end);
730 	tst_fzsync_pair_wait(&pair->a_cntr, &pair->b_cntr,
731 			     &pair->spins, &pair->exit, pair->yield_in_wait);
732 }
733 
734 /**
735  * Marks the start of a race region in thread B
736  *
737  * @relates tst_fzsync_pair
738  * @sa tst_fzsync_start_race_a
739  */
tst_fzsync_start_race_b(struct tst_fzsync_pair * pair)740 static inline void tst_fzsync_start_race_b(struct tst_fzsync_pair *pair)
741 {
742 	volatile int delay;
743 
744 	tst_fzsync_wait_b(pair);
745 
746 	delay = pair->delay;
747 	if (pair->yield_in_wait) {
748 		while (delay > 0) {
749 			sched_yield();
750 			delay--;
751 		}
752 	} else {
753 		while (delay > 0)
754 			delay--;
755 	}
756 
757 	tst_fzsync_time(&pair->b_start);
758 }
759 
760 /**
761  * Marks the end of a race region in thread B
762  *
763  * @relates tst_fzsync_pair
764  * @sa tst_fzsync_start_race_a
765  */
tst_fzsync_end_race_b(struct tst_fzsync_pair * pair)766 static inline void tst_fzsync_end_race_b(struct tst_fzsync_pair *pair)
767 {
768 	tst_fzsync_time(&pair->b_end);
769 	tst_fzsync_pair_wait(&pair->b_cntr, &pair->a_cntr,
770 			     &pair->spins, &pair->exit, pair->yield_in_wait);
771 }
772 
773 /**
774  * Add some amount to the delay bias
775  *
776  * @relates tst_fzsync_pair
777  * @param change The amount to add, can be negative
778  *
779  * A positive change delays thread B and a negative one delays thread
780  * A.
781  *
782  * It is intended to be used in tests where the time taken by syscall A and/or
783  * B are significantly affected by their chronological order. To the extent
784  * that the delay range will not include the correct values if too many of the
785  * initial samples are taken when the syscalls (or operations within the
786  * syscalls) happen in the wrong order.
787  *
788  * An example of this is cve/cve-2016-7117.c where a call to close() is racing
789  * with a call to recvmmsg(). If close() happens before recvmmsg() has chance
790  * to check if the file descriptor is open then recvmmsg() completes very
791  * quickly. If the call to close() happens once recvmmsg() has already checked
792  * the descriptor it takes much longer. The sample where recvmmsg() completes
793  * quickly is essentially invalid for our purposes. The test uses the simple
794  * heuristic of whether recvmmsg() returns EBADF, to decide if it should call
795  * tst_fzsync_pair_add_bias() to further delay syscall B.
796  */
tst_fzsync_pair_add_bias(struct tst_fzsync_pair * pair,int change)797 static inline void tst_fzsync_pair_add_bias(struct tst_fzsync_pair *pair, int change)
798 {
799 	if (pair->sampling > 0)
800 		pair->delay_bias += change;
801 }
802 
803 #endif /* TST_FUZZY_SYNC_H__ */
804