• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Module-based API test facility for ww_mutexes
4  */
5 
6 #include <linux/kernel.h>
7 
8 #include <linux/completion.h>
9 #include <linux/delay.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/ww_mutex.h>
15 
16 static DEFINE_WD_CLASS(ww_class);
17 struct workqueue_struct *wq;
18 
19 struct test_mutex {
20 	struct work_struct work;
21 	struct ww_mutex mutex;
22 	struct completion ready, go, done;
23 	unsigned int flags;
24 };
25 
26 #define TEST_MTX_SPIN BIT(0)
27 #define TEST_MTX_TRY BIT(1)
28 #define TEST_MTX_CTX BIT(2)
29 #define __TEST_MTX_LAST BIT(3)
30 
test_mutex_work(struct work_struct * work)31 static void test_mutex_work(struct work_struct *work)
32 {
33 	struct test_mutex *mtx = container_of(work, typeof(*mtx), work);
34 
35 	complete(&mtx->ready);
36 	wait_for_completion(&mtx->go);
37 
38 	if (mtx->flags & TEST_MTX_TRY) {
39 		while (!ww_mutex_trylock(&mtx->mutex))
40 			cond_resched();
41 	} else {
42 		ww_mutex_lock(&mtx->mutex, NULL);
43 	}
44 	complete(&mtx->done);
45 	ww_mutex_unlock(&mtx->mutex);
46 }
47 
__test_mutex(unsigned int flags)48 static int __test_mutex(unsigned int flags)
49 {
50 #define TIMEOUT (HZ / 16)
51 	struct test_mutex mtx;
52 	struct ww_acquire_ctx ctx;
53 	int ret;
54 
55 	ww_mutex_init(&mtx.mutex, &ww_class);
56 	ww_acquire_init(&ctx, &ww_class);
57 
58 	INIT_WORK_ONSTACK(&mtx.work, test_mutex_work);
59 	init_completion(&mtx.ready);
60 	init_completion(&mtx.go);
61 	init_completion(&mtx.done);
62 	mtx.flags = flags;
63 
64 	schedule_work(&mtx.work);
65 
66 	wait_for_completion(&mtx.ready);
67 	ww_mutex_lock(&mtx.mutex, (flags & TEST_MTX_CTX) ? &ctx : NULL);
68 	complete(&mtx.go);
69 	if (flags & TEST_MTX_SPIN) {
70 		unsigned long timeout = jiffies + TIMEOUT;
71 
72 		ret = 0;
73 		do {
74 			if (completion_done(&mtx.done)) {
75 				ret = -EINVAL;
76 				break;
77 			}
78 			cond_resched();
79 		} while (time_before(jiffies, timeout));
80 	} else {
81 		ret = wait_for_completion_timeout(&mtx.done, TIMEOUT);
82 	}
83 	ww_mutex_unlock(&mtx.mutex);
84 	ww_acquire_fini(&ctx);
85 
86 	if (ret) {
87 		pr_err("%s(flags=%x): mutual exclusion failure\n",
88 		       __func__, flags);
89 		ret = -EINVAL;
90 	}
91 
92 	flush_work(&mtx.work);
93 	destroy_work_on_stack(&mtx.work);
94 	return ret;
95 #undef TIMEOUT
96 }
97 
test_mutex(void)98 static int test_mutex(void)
99 {
100 	int ret;
101 	int i;
102 
103 	for (i = 0; i < __TEST_MTX_LAST; i++) {
104 		ret = __test_mutex(i);
105 		if (ret)
106 			return ret;
107 	}
108 
109 	return 0;
110 }
111 
test_aa(void)112 static int test_aa(void)
113 {
114 	struct ww_mutex mutex;
115 	struct ww_acquire_ctx ctx;
116 	int ret;
117 
118 	ww_mutex_init(&mutex, &ww_class);
119 	ww_acquire_init(&ctx, &ww_class);
120 
121 	ww_mutex_lock(&mutex, &ctx);
122 
123 	if (ww_mutex_trylock(&mutex))  {
124 		pr_err("%s: trylocked itself!\n", __func__);
125 		ww_mutex_unlock(&mutex);
126 		ret = -EINVAL;
127 		goto out;
128 	}
129 
130 	ret = ww_mutex_lock(&mutex, &ctx);
131 	if (ret != -EALREADY) {
132 		pr_err("%s: missed deadlock for recursing, ret=%d\n",
133 		       __func__, ret);
134 		if (!ret)
135 			ww_mutex_unlock(&mutex);
136 		ret = -EINVAL;
137 		goto out;
138 	}
139 
140 	ret = 0;
141 out:
142 	ww_mutex_unlock(&mutex);
143 	ww_acquire_fini(&ctx);
144 	return ret;
145 }
146 
147 struct test_abba {
148 	struct work_struct work;
149 	struct ww_mutex a_mutex;
150 	struct ww_mutex b_mutex;
151 	struct completion a_ready;
152 	struct completion b_ready;
153 	bool resolve;
154 	int result;
155 };
156 
test_abba_work(struct work_struct * work)157 static void test_abba_work(struct work_struct *work)
158 {
159 	struct test_abba *abba = container_of(work, typeof(*abba), work);
160 	struct ww_acquire_ctx ctx;
161 	int err;
162 
163 	ww_acquire_init(&ctx, &ww_class);
164 	ww_mutex_lock(&abba->b_mutex, &ctx);
165 
166 	complete(&abba->b_ready);
167 	wait_for_completion(&abba->a_ready);
168 
169 	err = ww_mutex_lock(&abba->a_mutex, &ctx);
170 	if (abba->resolve && err == -EDEADLK) {
171 		ww_mutex_unlock(&abba->b_mutex);
172 		ww_mutex_lock_slow(&abba->a_mutex, &ctx);
173 		err = ww_mutex_lock(&abba->b_mutex, &ctx);
174 	}
175 
176 	if (!err)
177 		ww_mutex_unlock(&abba->a_mutex);
178 	ww_mutex_unlock(&abba->b_mutex);
179 	ww_acquire_fini(&ctx);
180 
181 	abba->result = err;
182 }
183 
test_abba(bool resolve)184 static int test_abba(bool resolve)
185 {
186 	struct test_abba abba;
187 	struct ww_acquire_ctx ctx;
188 	int err, ret;
189 
190 	ww_mutex_init(&abba.a_mutex, &ww_class);
191 	ww_mutex_init(&abba.b_mutex, &ww_class);
192 	INIT_WORK_ONSTACK(&abba.work, test_abba_work);
193 	init_completion(&abba.a_ready);
194 	init_completion(&abba.b_ready);
195 	abba.resolve = resolve;
196 
197 	schedule_work(&abba.work);
198 
199 	ww_acquire_init(&ctx, &ww_class);
200 	ww_mutex_lock(&abba.a_mutex, &ctx);
201 
202 	complete(&abba.a_ready);
203 	wait_for_completion(&abba.b_ready);
204 
205 	err = ww_mutex_lock(&abba.b_mutex, &ctx);
206 	if (resolve && err == -EDEADLK) {
207 		ww_mutex_unlock(&abba.a_mutex);
208 		ww_mutex_lock_slow(&abba.b_mutex, &ctx);
209 		err = ww_mutex_lock(&abba.a_mutex, &ctx);
210 	}
211 
212 	if (!err)
213 		ww_mutex_unlock(&abba.b_mutex);
214 	ww_mutex_unlock(&abba.a_mutex);
215 	ww_acquire_fini(&ctx);
216 
217 	flush_work(&abba.work);
218 	destroy_work_on_stack(&abba.work);
219 
220 	ret = 0;
221 	if (resolve) {
222 		if (err || abba.result) {
223 			pr_err("%s: failed to resolve ABBA deadlock, A err=%d, B err=%d\n",
224 			       __func__, err, abba.result);
225 			ret = -EINVAL;
226 		}
227 	} else {
228 		if (err != -EDEADLK && abba.result != -EDEADLK) {
229 			pr_err("%s: missed ABBA deadlock, A err=%d, B err=%d\n",
230 			       __func__, err, abba.result);
231 			ret = -EINVAL;
232 		}
233 	}
234 	return ret;
235 }
236 
237 struct test_cycle {
238 	struct work_struct work;
239 	struct ww_mutex a_mutex;
240 	struct ww_mutex *b_mutex;
241 	struct completion *a_signal;
242 	struct completion b_signal;
243 	int result;
244 };
245 
test_cycle_work(struct work_struct * work)246 static void test_cycle_work(struct work_struct *work)
247 {
248 	struct test_cycle *cycle = container_of(work, typeof(*cycle), work);
249 	struct ww_acquire_ctx ctx;
250 	int err, erra = 0;
251 
252 	ww_acquire_init(&ctx, &ww_class);
253 	ww_mutex_lock(&cycle->a_mutex, &ctx);
254 
255 	complete(cycle->a_signal);
256 	wait_for_completion(&cycle->b_signal);
257 
258 	err = ww_mutex_lock(cycle->b_mutex, &ctx);
259 	if (err == -EDEADLK) {
260 		err = 0;
261 		ww_mutex_unlock(&cycle->a_mutex);
262 		ww_mutex_lock_slow(cycle->b_mutex, &ctx);
263 		erra = ww_mutex_lock(&cycle->a_mutex, &ctx);
264 	}
265 
266 	if (!err)
267 		ww_mutex_unlock(cycle->b_mutex);
268 	if (!erra)
269 		ww_mutex_unlock(&cycle->a_mutex);
270 	ww_acquire_fini(&ctx);
271 
272 	cycle->result = err ?: erra;
273 }
274 
__test_cycle(unsigned int nthreads)275 static int __test_cycle(unsigned int nthreads)
276 {
277 	struct test_cycle *cycles;
278 	unsigned int n, last = nthreads - 1;
279 	int ret;
280 
281 	cycles = kmalloc_array(nthreads, sizeof(*cycles), GFP_KERNEL);
282 	if (!cycles)
283 		return -ENOMEM;
284 
285 	for (n = 0; n < nthreads; n++) {
286 		struct test_cycle *cycle = &cycles[n];
287 
288 		ww_mutex_init(&cycle->a_mutex, &ww_class);
289 		if (n == last)
290 			cycle->b_mutex = &cycles[0].a_mutex;
291 		else
292 			cycle->b_mutex = &cycles[n + 1].a_mutex;
293 
294 		if (n == 0)
295 			cycle->a_signal = &cycles[last].b_signal;
296 		else
297 			cycle->a_signal = &cycles[n - 1].b_signal;
298 		init_completion(&cycle->b_signal);
299 
300 		INIT_WORK(&cycle->work, test_cycle_work);
301 		cycle->result = 0;
302 	}
303 
304 	for (n = 0; n < nthreads; n++)
305 		queue_work(wq, &cycles[n].work);
306 
307 	flush_workqueue(wq);
308 
309 	ret = 0;
310 	for (n = 0; n < nthreads; n++) {
311 		struct test_cycle *cycle = &cycles[n];
312 
313 		if (!cycle->result)
314 			continue;
315 
316 		pr_err("cyclic deadlock not resolved, ret[%d/%d] = %d\n",
317 		       n, nthreads, cycle->result);
318 		ret = -EINVAL;
319 		break;
320 	}
321 
322 	for (n = 0; n < nthreads; n++)
323 		ww_mutex_destroy(&cycles[n].a_mutex);
324 	kfree(cycles);
325 	return ret;
326 }
327 
test_cycle(unsigned int ncpus)328 static int test_cycle(unsigned int ncpus)
329 {
330 	unsigned int n;
331 	int ret;
332 
333 	for (n = 2; n <= ncpus + 1; n++) {
334 		ret = __test_cycle(n);
335 		if (ret)
336 			return ret;
337 	}
338 
339 	return 0;
340 }
341 
342 struct stress {
343 	struct work_struct work;
344 	struct ww_mutex *locks;
345 	unsigned long timeout;
346 	int nlocks;
347 };
348 
get_random_order(int count)349 static int *get_random_order(int count)
350 {
351 	int *order;
352 	int n, r, tmp;
353 
354 	order = kmalloc_array(count, sizeof(*order), GFP_KERNEL);
355 	if (!order)
356 		return order;
357 
358 	for (n = 0; n < count; n++)
359 		order[n] = n;
360 
361 	for (n = count - 1; n > 1; n--) {
362 		r = get_random_int() % (n + 1);
363 		if (r != n) {
364 			tmp = order[n];
365 			order[n] = order[r];
366 			order[r] = tmp;
367 		}
368 	}
369 
370 	return order;
371 }
372 
dummy_load(struct stress * stress)373 static void dummy_load(struct stress *stress)
374 {
375 	usleep_range(1000, 2000);
376 }
377 
stress_inorder_work(struct work_struct * work)378 static void stress_inorder_work(struct work_struct *work)
379 {
380 	struct stress *stress = container_of(work, typeof(*stress), work);
381 	const int nlocks = stress->nlocks;
382 	struct ww_mutex *locks = stress->locks;
383 	struct ww_acquire_ctx ctx;
384 	int *order;
385 
386 	order = get_random_order(nlocks);
387 	if (!order)
388 		return;
389 
390 	do {
391 		int contended = -1;
392 		int n, err;
393 
394 		ww_acquire_init(&ctx, &ww_class);
395 retry:
396 		err = 0;
397 		for (n = 0; n < nlocks; n++) {
398 			if (n == contended)
399 				continue;
400 
401 			err = ww_mutex_lock(&locks[order[n]], &ctx);
402 			if (err < 0)
403 				break;
404 		}
405 		if (!err)
406 			dummy_load(stress);
407 
408 		if (contended > n)
409 			ww_mutex_unlock(&locks[order[contended]]);
410 		contended = n;
411 		while (n--)
412 			ww_mutex_unlock(&locks[order[n]]);
413 
414 		if (err == -EDEADLK) {
415 			ww_mutex_lock_slow(&locks[order[contended]], &ctx);
416 			goto retry;
417 		}
418 
419 		if (err) {
420 			pr_err_once("stress (%s) failed with %d\n",
421 				    __func__, err);
422 			break;
423 		}
424 
425 		ww_acquire_fini(&ctx);
426 	} while (!time_after(jiffies, stress->timeout));
427 
428 	kfree(order);
429 }
430 
431 struct reorder_lock {
432 	struct list_head link;
433 	struct ww_mutex *lock;
434 };
435 
stress_reorder_work(struct work_struct * work)436 static void stress_reorder_work(struct work_struct *work)
437 {
438 	struct stress *stress = container_of(work, typeof(*stress), work);
439 	LIST_HEAD(locks);
440 	struct ww_acquire_ctx ctx;
441 	struct reorder_lock *ll, *ln;
442 	int *order;
443 	int n, err;
444 
445 	order = get_random_order(stress->nlocks);
446 	if (!order)
447 		return;
448 
449 	for (n = 0; n < stress->nlocks; n++) {
450 		ll = kmalloc(sizeof(*ll), GFP_KERNEL);
451 		if (!ll)
452 			goto out;
453 
454 		ll->lock = &stress->locks[order[n]];
455 		list_add(&ll->link, &locks);
456 	}
457 	kfree(order);
458 	order = NULL;
459 
460 	do {
461 		ww_acquire_init(&ctx, &ww_class);
462 
463 		list_for_each_entry(ll, &locks, link) {
464 			err = ww_mutex_lock(ll->lock, &ctx);
465 			if (!err)
466 				continue;
467 
468 			ln = ll;
469 			list_for_each_entry_continue_reverse(ln, &locks, link)
470 				ww_mutex_unlock(ln->lock);
471 
472 			if (err != -EDEADLK) {
473 				pr_err_once("stress (%s) failed with %d\n",
474 					    __func__, err);
475 				break;
476 			}
477 
478 			ww_mutex_lock_slow(ll->lock, &ctx);
479 			list_move(&ll->link, &locks); /* restarts iteration */
480 		}
481 
482 		dummy_load(stress);
483 		list_for_each_entry(ll, &locks, link)
484 			ww_mutex_unlock(ll->lock);
485 
486 		ww_acquire_fini(&ctx);
487 	} while (!time_after(jiffies, stress->timeout));
488 
489 out:
490 	list_for_each_entry_safe(ll, ln, &locks, link)
491 		kfree(ll);
492 	kfree(order);
493 }
494 
stress_one_work(struct work_struct * work)495 static void stress_one_work(struct work_struct *work)
496 {
497 	struct stress *stress = container_of(work, typeof(*stress), work);
498 	const int nlocks = stress->nlocks;
499 	struct ww_mutex *lock = stress->locks + (get_random_int() % nlocks);
500 	int err;
501 
502 	do {
503 		err = ww_mutex_lock(lock, NULL);
504 		if (!err) {
505 			dummy_load(stress);
506 			ww_mutex_unlock(lock);
507 		} else {
508 			pr_err_once("stress (%s) failed with %d\n",
509 				    __func__, err);
510 			break;
511 		}
512 	} while (!time_after(jiffies, stress->timeout));
513 }
514 
515 #define STRESS_INORDER BIT(0)
516 #define STRESS_REORDER BIT(1)
517 #define STRESS_ONE BIT(2)
518 #define STRESS_ALL (STRESS_INORDER | STRESS_REORDER | STRESS_ONE)
519 
stress(int nlocks,int nthreads,unsigned int flags)520 static int stress(int nlocks, int nthreads, unsigned int flags)
521 {
522 	struct ww_mutex *locks;
523 	struct stress *stress_array;
524 	int n, count;
525 
526 	locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
527 	if (!locks)
528 		return -ENOMEM;
529 
530 	stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
531 				     GFP_KERNEL);
532 	if (!stress_array) {
533 		kfree(locks);
534 		return -ENOMEM;
535 	}
536 
537 	for (n = 0; n < nlocks; n++)
538 		ww_mutex_init(&locks[n], &ww_class);
539 
540 	count = 0;
541 	for (n = 0; nthreads; n++) {
542 		struct stress *stress;
543 		void (*fn)(struct work_struct *work);
544 
545 		fn = NULL;
546 		switch (n & 3) {
547 		case 0:
548 			if (flags & STRESS_INORDER)
549 				fn = stress_inorder_work;
550 			break;
551 		case 1:
552 			if (flags & STRESS_REORDER)
553 				fn = stress_reorder_work;
554 			break;
555 		case 2:
556 			if (flags & STRESS_ONE)
557 				fn = stress_one_work;
558 			break;
559 		}
560 
561 		if (!fn)
562 			continue;
563 
564 		stress = &stress_array[count++];
565 
566 		INIT_WORK(&stress->work, fn);
567 		stress->locks = locks;
568 		stress->nlocks = nlocks;
569 		stress->timeout = jiffies + 2*HZ;
570 
571 		queue_work(wq, &stress->work);
572 		nthreads--;
573 	}
574 
575 	flush_workqueue(wq);
576 
577 	for (n = 0; n < nlocks; n++)
578 		ww_mutex_destroy(&locks[n]);
579 	kfree(stress_array);
580 	kfree(locks);
581 
582 	return 0;
583 }
584 
test_ww_mutex_init(void)585 static int __init test_ww_mutex_init(void)
586 {
587 	int ncpus = num_online_cpus();
588 	int ret;
589 
590 	wq = alloc_workqueue("test-ww_mutex", WQ_UNBOUND, 0);
591 	if (!wq)
592 		return -ENOMEM;
593 
594 	ret = test_mutex();
595 	if (ret)
596 		return ret;
597 
598 	ret = test_aa();
599 	if (ret)
600 		return ret;
601 
602 	ret = test_abba(false);
603 	if (ret)
604 		return ret;
605 
606 	ret = test_abba(true);
607 	if (ret)
608 		return ret;
609 
610 	ret = test_cycle(ncpus);
611 	if (ret)
612 		return ret;
613 
614 	ret = stress(16, 2*ncpus, STRESS_INORDER);
615 	if (ret)
616 		return ret;
617 
618 	ret = stress(16, 2*ncpus, STRESS_REORDER);
619 	if (ret)
620 		return ret;
621 
622 	ret = stress(4095, hweight32(STRESS_ALL)*ncpus, STRESS_ALL);
623 	if (ret)
624 		return ret;
625 
626 	return 0;
627 }
628 
test_ww_mutex_exit(void)629 static void __exit test_ww_mutex_exit(void)
630 {
631 	destroy_workqueue(wq);
632 }
633 
634 module_init(test_ww_mutex_init);
635 module_exit(test_ww_mutex_exit);
636 
637 MODULE_LICENSE("GPL");
638 MODULE_AUTHOR("Intel Corporation");
639