1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Module-based torture test facility for locking
4 *
5 * Copyright (C) IBM Corporation, 2014
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Davidlohr Bueso <dave@stgolabs.net>
9 * Based on kernel/rcu/torture.c.
10 */
11
12 #define pr_fmt(fmt) fmt
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/sched/rt.h>
18 #include <linux/spinlock.h>
19 #include <linux/mutex.h>
20 #include <linux/rwsem.h>
21 #include <linux/smp.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/rtmutex.h>
26 #include <linux/atomic.h>
27 #include <linux/moduleparam.h>
28 #include <linux/delay.h>
29 #include <linux/slab.h>
30 #include <linux/torture.h>
31 #include <linux/reboot.h>
32
33 MODULE_LICENSE("GPL");
34 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
35
36 torture_param(int, nwriters_stress, -1,
37 "Number of write-locking stress-test threads");
38 torture_param(int, nreaders_stress, -1,
39 "Number of read-locking stress-test threads");
40 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
41 torture_param(int, onoff_interval, 0,
42 "Time between CPU hotplugs (s), 0=disable");
43 torture_param(int, shuffle_interval, 3,
44 "Number of jiffies between shuffles, 0=disable");
45 torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
46 torture_param(int, stat_interval, 60,
47 "Number of seconds between stats printk()s");
48 torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
49 torture_param(int, verbose, 1,
50 "Enable verbose debugging printk()s");
51
52 static char *torture_type = "spin_lock";
53 module_param(torture_type, charp, 0444);
54 MODULE_PARM_DESC(torture_type,
55 "Type of lock to torture (spin_lock, spin_lock_irq, mutex_lock, ...)");
56
57 static struct task_struct *stats_task;
58 static struct task_struct **writer_tasks;
59 static struct task_struct **reader_tasks;
60
61 static bool lock_is_write_held;
62 static atomic_t lock_is_read_held;
63 static unsigned long last_lock_release;
64
65 struct lock_stress_stats {
66 long n_lock_fail;
67 long n_lock_acquired;
68 };
69
70 /* Forward reference. */
71 static void lock_torture_cleanup(void);
72
73 /*
74 * Operations vector for selecting different types of tests.
75 */
76 struct lock_torture_ops {
77 void (*init)(void);
78 void (*exit)(void);
79 int (*writelock)(int tid);
80 void (*write_delay)(struct torture_random_state *trsp);
81 void (*task_boost)(struct torture_random_state *trsp);
82 void (*writeunlock)(int tid);
83 int (*readlock)(int tid);
84 void (*read_delay)(struct torture_random_state *trsp);
85 void (*readunlock)(int tid);
86
87 unsigned long flags; /* for irq spinlocks */
88 const char *name;
89 };
90
91 struct lock_torture_cxt {
92 int nrealwriters_stress;
93 int nrealreaders_stress;
94 bool debug_lock;
95 bool init_called;
96 atomic_t n_lock_torture_errors;
97 struct lock_torture_ops *cur_ops;
98 struct lock_stress_stats *lwsa; /* writer statistics */
99 struct lock_stress_stats *lrsa; /* reader statistics */
100 };
101 static struct lock_torture_cxt cxt = { 0, 0, false, false,
102 ATOMIC_INIT(0),
103 NULL, NULL};
104 /*
105 * Definitions for lock torture testing.
106 */
107
torture_lock_busted_write_lock(int tid __maybe_unused)108 static int torture_lock_busted_write_lock(int tid __maybe_unused)
109 {
110 return 0; /* BUGGY, do not use in real life!!! */
111 }
112
torture_lock_busted_write_delay(struct torture_random_state * trsp)113 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
114 {
115 const unsigned long longdelay_ms = 100;
116
117 /* We want a long delay occasionally to force massive contention. */
118 if (!(torture_random(trsp) %
119 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
120 mdelay(longdelay_ms);
121 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
122 torture_preempt_schedule(); /* Allow test to be preempted. */
123 }
124
torture_lock_busted_write_unlock(int tid __maybe_unused)125 static void torture_lock_busted_write_unlock(int tid __maybe_unused)
126 {
127 /* BUGGY, do not use in real life!!! */
128 }
129
torture_boost_dummy(struct torture_random_state * trsp)130 static void torture_boost_dummy(struct torture_random_state *trsp)
131 {
132 /* Only rtmutexes care about priority */
133 }
134
135 static struct lock_torture_ops lock_busted_ops = {
136 .writelock = torture_lock_busted_write_lock,
137 .write_delay = torture_lock_busted_write_delay,
138 .task_boost = torture_boost_dummy,
139 .writeunlock = torture_lock_busted_write_unlock,
140 .readlock = NULL,
141 .read_delay = NULL,
142 .readunlock = NULL,
143 .name = "lock_busted"
144 };
145
146 static DEFINE_SPINLOCK(torture_spinlock);
147
torture_spin_lock_write_lock(int tid __maybe_unused)148 static int torture_spin_lock_write_lock(int tid __maybe_unused)
149 __acquires(torture_spinlock)
150 {
151 spin_lock(&torture_spinlock);
152 return 0;
153 }
154
torture_spin_lock_write_delay(struct torture_random_state * trsp)155 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
156 {
157 const unsigned long shortdelay_us = 2;
158 const unsigned long longdelay_ms = 100;
159
160 /* We want a short delay mostly to emulate likely code, and
161 * we want a long delay occasionally to force massive contention.
162 */
163 if (!(torture_random(trsp) %
164 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
165 mdelay(longdelay_ms);
166 if (!(torture_random(trsp) %
167 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
168 udelay(shortdelay_us);
169 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
170 torture_preempt_schedule(); /* Allow test to be preempted. */
171 }
172
torture_spin_lock_write_unlock(int tid __maybe_unused)173 static void torture_spin_lock_write_unlock(int tid __maybe_unused)
174 __releases(torture_spinlock)
175 {
176 spin_unlock(&torture_spinlock);
177 }
178
179 static struct lock_torture_ops spin_lock_ops = {
180 .writelock = torture_spin_lock_write_lock,
181 .write_delay = torture_spin_lock_write_delay,
182 .task_boost = torture_boost_dummy,
183 .writeunlock = torture_spin_lock_write_unlock,
184 .readlock = NULL,
185 .read_delay = NULL,
186 .readunlock = NULL,
187 .name = "spin_lock"
188 };
189
torture_spin_lock_write_lock_irq(int tid __maybe_unused)190 static int torture_spin_lock_write_lock_irq(int tid __maybe_unused)
191 __acquires(torture_spinlock)
192 {
193 unsigned long flags;
194
195 spin_lock_irqsave(&torture_spinlock, flags);
196 cxt.cur_ops->flags = flags;
197 return 0;
198 }
199
torture_lock_spin_write_unlock_irq(int tid __maybe_unused)200 static void torture_lock_spin_write_unlock_irq(int tid __maybe_unused)
201 __releases(torture_spinlock)
202 {
203 spin_unlock_irqrestore(&torture_spinlock, cxt.cur_ops->flags);
204 }
205
206 static struct lock_torture_ops spin_lock_irq_ops = {
207 .writelock = torture_spin_lock_write_lock_irq,
208 .write_delay = torture_spin_lock_write_delay,
209 .task_boost = torture_boost_dummy,
210 .writeunlock = torture_lock_spin_write_unlock_irq,
211 .readlock = NULL,
212 .read_delay = NULL,
213 .readunlock = NULL,
214 .name = "spin_lock_irq"
215 };
216
217 static DEFINE_RWLOCK(torture_rwlock);
218
torture_rwlock_write_lock(int tid __maybe_unused)219 static int torture_rwlock_write_lock(int tid __maybe_unused)
220 __acquires(torture_rwlock)
221 {
222 write_lock(&torture_rwlock);
223 return 0;
224 }
225
torture_rwlock_write_delay(struct torture_random_state * trsp)226 static void torture_rwlock_write_delay(struct torture_random_state *trsp)
227 {
228 const unsigned long shortdelay_us = 2;
229 const unsigned long longdelay_ms = 100;
230
231 /* We want a short delay mostly to emulate likely code, and
232 * we want a long delay occasionally to force massive contention.
233 */
234 if (!(torture_random(trsp) %
235 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
236 mdelay(longdelay_ms);
237 else
238 udelay(shortdelay_us);
239 }
240
torture_rwlock_write_unlock(int tid __maybe_unused)241 static void torture_rwlock_write_unlock(int tid __maybe_unused)
242 __releases(torture_rwlock)
243 {
244 write_unlock(&torture_rwlock);
245 }
246
torture_rwlock_read_lock(int tid __maybe_unused)247 static int torture_rwlock_read_lock(int tid __maybe_unused)
248 __acquires(torture_rwlock)
249 {
250 read_lock(&torture_rwlock);
251 return 0;
252 }
253
torture_rwlock_read_delay(struct torture_random_state * trsp)254 static void torture_rwlock_read_delay(struct torture_random_state *trsp)
255 {
256 const unsigned long shortdelay_us = 10;
257 const unsigned long longdelay_ms = 100;
258
259 /* We want a short delay mostly to emulate likely code, and
260 * we want a long delay occasionally to force massive contention.
261 */
262 if (!(torture_random(trsp) %
263 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
264 mdelay(longdelay_ms);
265 else
266 udelay(shortdelay_us);
267 }
268
torture_rwlock_read_unlock(int tid __maybe_unused)269 static void torture_rwlock_read_unlock(int tid __maybe_unused)
270 __releases(torture_rwlock)
271 {
272 read_unlock(&torture_rwlock);
273 }
274
275 static struct lock_torture_ops rw_lock_ops = {
276 .writelock = torture_rwlock_write_lock,
277 .write_delay = torture_rwlock_write_delay,
278 .task_boost = torture_boost_dummy,
279 .writeunlock = torture_rwlock_write_unlock,
280 .readlock = torture_rwlock_read_lock,
281 .read_delay = torture_rwlock_read_delay,
282 .readunlock = torture_rwlock_read_unlock,
283 .name = "rw_lock"
284 };
285
torture_rwlock_write_lock_irq(int tid __maybe_unused)286 static int torture_rwlock_write_lock_irq(int tid __maybe_unused)
287 __acquires(torture_rwlock)
288 {
289 unsigned long flags;
290
291 write_lock_irqsave(&torture_rwlock, flags);
292 cxt.cur_ops->flags = flags;
293 return 0;
294 }
295
torture_rwlock_write_unlock_irq(int tid __maybe_unused)296 static void torture_rwlock_write_unlock_irq(int tid __maybe_unused)
297 __releases(torture_rwlock)
298 {
299 write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
300 }
301
torture_rwlock_read_lock_irq(int tid __maybe_unused)302 static int torture_rwlock_read_lock_irq(int tid __maybe_unused)
303 __acquires(torture_rwlock)
304 {
305 unsigned long flags;
306
307 read_lock_irqsave(&torture_rwlock, flags);
308 cxt.cur_ops->flags = flags;
309 return 0;
310 }
311
torture_rwlock_read_unlock_irq(int tid __maybe_unused)312 static void torture_rwlock_read_unlock_irq(int tid __maybe_unused)
313 __releases(torture_rwlock)
314 {
315 read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
316 }
317
318 static struct lock_torture_ops rw_lock_irq_ops = {
319 .writelock = torture_rwlock_write_lock_irq,
320 .write_delay = torture_rwlock_write_delay,
321 .task_boost = torture_boost_dummy,
322 .writeunlock = torture_rwlock_write_unlock_irq,
323 .readlock = torture_rwlock_read_lock_irq,
324 .read_delay = torture_rwlock_read_delay,
325 .readunlock = torture_rwlock_read_unlock_irq,
326 .name = "rw_lock_irq"
327 };
328
329 static DEFINE_MUTEX(torture_mutex);
330
torture_mutex_lock(int tid __maybe_unused)331 static int torture_mutex_lock(int tid __maybe_unused)
332 __acquires(torture_mutex)
333 {
334 mutex_lock(&torture_mutex);
335 return 0;
336 }
337
torture_mutex_delay(struct torture_random_state * trsp)338 static void torture_mutex_delay(struct torture_random_state *trsp)
339 {
340 const unsigned long longdelay_ms = 100;
341
342 /* We want a long delay occasionally to force massive contention. */
343 if (!(torture_random(trsp) %
344 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
345 mdelay(longdelay_ms * 5);
346 else
347 mdelay(longdelay_ms / 5);
348 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
349 torture_preempt_schedule(); /* Allow test to be preempted. */
350 }
351
torture_mutex_unlock(int tid __maybe_unused)352 static void torture_mutex_unlock(int tid __maybe_unused)
353 __releases(torture_mutex)
354 {
355 mutex_unlock(&torture_mutex);
356 }
357
358 static struct lock_torture_ops mutex_lock_ops = {
359 .writelock = torture_mutex_lock,
360 .write_delay = torture_mutex_delay,
361 .task_boost = torture_boost_dummy,
362 .writeunlock = torture_mutex_unlock,
363 .readlock = NULL,
364 .read_delay = NULL,
365 .readunlock = NULL,
366 .name = "mutex_lock"
367 };
368
369 #include <linux/ww_mutex.h>
370 /*
371 * The torture ww_mutexes should belong to the same lock class as
372 * torture_ww_class to avoid lockdep problem. The ww_mutex_init()
373 * function is called for initialization to ensure that.
374 */
375 static DEFINE_WD_CLASS(torture_ww_class);
376 static struct ww_mutex torture_ww_mutex_0, torture_ww_mutex_1, torture_ww_mutex_2;
377 static struct ww_acquire_ctx *ww_acquire_ctxs;
378
torture_ww_mutex_init(void)379 static void torture_ww_mutex_init(void)
380 {
381 ww_mutex_init(&torture_ww_mutex_0, &torture_ww_class);
382 ww_mutex_init(&torture_ww_mutex_1, &torture_ww_class);
383 ww_mutex_init(&torture_ww_mutex_2, &torture_ww_class);
384
385 ww_acquire_ctxs = kmalloc_array(cxt.nrealwriters_stress,
386 sizeof(*ww_acquire_ctxs),
387 GFP_KERNEL);
388 if (!ww_acquire_ctxs)
389 VERBOSE_TOROUT_STRING("ww_acquire_ctx: Out of memory");
390 }
391
torture_ww_mutex_exit(void)392 static void torture_ww_mutex_exit(void)
393 {
394 kfree(ww_acquire_ctxs);
395 }
396
torture_ww_mutex_lock(int tid)397 static int torture_ww_mutex_lock(int tid)
398 __acquires(torture_ww_mutex_0)
399 __acquires(torture_ww_mutex_1)
400 __acquires(torture_ww_mutex_2)
401 {
402 LIST_HEAD(list);
403 struct reorder_lock {
404 struct list_head link;
405 struct ww_mutex *lock;
406 } locks[3], *ll, *ln;
407 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
408
409 locks[0].lock = &torture_ww_mutex_0;
410 list_add(&locks[0].link, &list);
411
412 locks[1].lock = &torture_ww_mutex_1;
413 list_add(&locks[1].link, &list);
414
415 locks[2].lock = &torture_ww_mutex_2;
416 list_add(&locks[2].link, &list);
417
418 ww_acquire_init(ctx, &torture_ww_class);
419
420 list_for_each_entry(ll, &list, link) {
421 int err;
422
423 err = ww_mutex_lock(ll->lock, ctx);
424 if (!err)
425 continue;
426
427 ln = ll;
428 list_for_each_entry_continue_reverse(ln, &list, link)
429 ww_mutex_unlock(ln->lock);
430
431 if (err != -EDEADLK)
432 return err;
433
434 ww_mutex_lock_slow(ll->lock, ctx);
435 list_move(&ll->link, &list);
436 }
437
438 return 0;
439 }
440
torture_ww_mutex_unlock(int tid)441 static void torture_ww_mutex_unlock(int tid)
442 __releases(torture_ww_mutex_0)
443 __releases(torture_ww_mutex_1)
444 __releases(torture_ww_mutex_2)
445 {
446 struct ww_acquire_ctx *ctx = &ww_acquire_ctxs[tid];
447
448 ww_mutex_unlock(&torture_ww_mutex_0);
449 ww_mutex_unlock(&torture_ww_mutex_1);
450 ww_mutex_unlock(&torture_ww_mutex_2);
451 ww_acquire_fini(ctx);
452 }
453
454 static struct lock_torture_ops ww_mutex_lock_ops = {
455 .init = torture_ww_mutex_init,
456 .exit = torture_ww_mutex_exit,
457 .writelock = torture_ww_mutex_lock,
458 .write_delay = torture_mutex_delay,
459 .task_boost = torture_boost_dummy,
460 .writeunlock = torture_ww_mutex_unlock,
461 .readlock = NULL,
462 .read_delay = NULL,
463 .readunlock = NULL,
464 .name = "ww_mutex_lock"
465 };
466
467 #ifdef CONFIG_RT_MUTEXES
468 static DEFINE_RT_MUTEX(torture_rtmutex);
469
torture_rtmutex_lock(int tid __maybe_unused)470 static int torture_rtmutex_lock(int tid __maybe_unused)
471 __acquires(torture_rtmutex)
472 {
473 rt_mutex_lock(&torture_rtmutex);
474 return 0;
475 }
476
torture_rtmutex_boost(struct torture_random_state * trsp)477 static void torture_rtmutex_boost(struct torture_random_state *trsp)
478 {
479 const unsigned int factor = 50000; /* yes, quite arbitrary */
480
481 if (!rt_task(current)) {
482 /*
483 * Boost priority once every ~50k operations. When the
484 * task tries to take the lock, the rtmutex it will account
485 * for the new priority, and do any corresponding pi-dance.
486 */
487 if (trsp && !(torture_random(trsp) %
488 (cxt.nrealwriters_stress * factor))) {
489 sched_set_fifo(current);
490 } else /* common case, do nothing */
491 return;
492 } else {
493 /*
494 * The task will remain boosted for another ~500k operations,
495 * then restored back to its original prio, and so forth.
496 *
497 * When @trsp is nil, we want to force-reset the task for
498 * stopping the kthread.
499 */
500 if (!trsp || !(torture_random(trsp) %
501 (cxt.nrealwriters_stress * factor * 2))) {
502 sched_set_normal(current, 0);
503 } else /* common case, do nothing */
504 return;
505 }
506 }
507
torture_rtmutex_delay(struct torture_random_state * trsp)508 static void torture_rtmutex_delay(struct torture_random_state *trsp)
509 {
510 const unsigned long shortdelay_us = 2;
511 const unsigned long longdelay_ms = 100;
512
513 /*
514 * We want a short delay mostly to emulate likely code, and
515 * we want a long delay occasionally to force massive contention.
516 */
517 if (!(torture_random(trsp) %
518 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
519 mdelay(longdelay_ms);
520 if (!(torture_random(trsp) %
521 (cxt.nrealwriters_stress * 2 * shortdelay_us)))
522 udelay(shortdelay_us);
523 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
524 torture_preempt_schedule(); /* Allow test to be preempted. */
525 }
526
torture_rtmutex_unlock(int tid __maybe_unused)527 static void torture_rtmutex_unlock(int tid __maybe_unused)
528 __releases(torture_rtmutex)
529 {
530 rt_mutex_unlock(&torture_rtmutex);
531 }
532
533 static struct lock_torture_ops rtmutex_lock_ops = {
534 .writelock = torture_rtmutex_lock,
535 .write_delay = torture_rtmutex_delay,
536 .task_boost = torture_rtmutex_boost,
537 .writeunlock = torture_rtmutex_unlock,
538 .readlock = NULL,
539 .read_delay = NULL,
540 .readunlock = NULL,
541 .name = "rtmutex_lock"
542 };
543 #endif
544
545 static DECLARE_RWSEM(torture_rwsem);
torture_rwsem_down_write(int tid __maybe_unused)546 static int torture_rwsem_down_write(int tid __maybe_unused)
547 __acquires(torture_rwsem)
548 {
549 down_write(&torture_rwsem);
550 return 0;
551 }
552
torture_rwsem_write_delay(struct torture_random_state * trsp)553 static void torture_rwsem_write_delay(struct torture_random_state *trsp)
554 {
555 const unsigned long longdelay_ms = 100;
556
557 /* We want a long delay occasionally to force massive contention. */
558 if (!(torture_random(trsp) %
559 (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
560 mdelay(longdelay_ms * 10);
561 else
562 mdelay(longdelay_ms / 10);
563 if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
564 torture_preempt_schedule(); /* Allow test to be preempted. */
565 }
566
torture_rwsem_up_write(int tid __maybe_unused)567 static void torture_rwsem_up_write(int tid __maybe_unused)
568 __releases(torture_rwsem)
569 {
570 up_write(&torture_rwsem);
571 }
572
torture_rwsem_down_read(int tid __maybe_unused)573 static int torture_rwsem_down_read(int tid __maybe_unused)
574 __acquires(torture_rwsem)
575 {
576 down_read(&torture_rwsem);
577 return 0;
578 }
579
torture_rwsem_read_delay(struct torture_random_state * trsp)580 static void torture_rwsem_read_delay(struct torture_random_state *trsp)
581 {
582 const unsigned long longdelay_ms = 100;
583
584 /* We want a long delay occasionally to force massive contention. */
585 if (!(torture_random(trsp) %
586 (cxt.nrealreaders_stress * 2000 * longdelay_ms)))
587 mdelay(longdelay_ms * 2);
588 else
589 mdelay(longdelay_ms / 2);
590 if (!(torture_random(trsp) % (cxt.nrealreaders_stress * 20000)))
591 torture_preempt_schedule(); /* Allow test to be preempted. */
592 }
593
torture_rwsem_up_read(int tid __maybe_unused)594 static void torture_rwsem_up_read(int tid __maybe_unused)
595 __releases(torture_rwsem)
596 {
597 up_read(&torture_rwsem);
598 }
599
600 static struct lock_torture_ops rwsem_lock_ops = {
601 .writelock = torture_rwsem_down_write,
602 .write_delay = torture_rwsem_write_delay,
603 .task_boost = torture_boost_dummy,
604 .writeunlock = torture_rwsem_up_write,
605 .readlock = torture_rwsem_down_read,
606 .read_delay = torture_rwsem_read_delay,
607 .readunlock = torture_rwsem_up_read,
608 .name = "rwsem_lock"
609 };
610
611 #include <linux/percpu-rwsem.h>
612 static struct percpu_rw_semaphore pcpu_rwsem;
613
torture_percpu_rwsem_init(void)614 static void torture_percpu_rwsem_init(void)
615 {
616 BUG_ON(percpu_init_rwsem(&pcpu_rwsem));
617 }
618
torture_percpu_rwsem_exit(void)619 static void torture_percpu_rwsem_exit(void)
620 {
621 percpu_free_rwsem(&pcpu_rwsem);
622 }
623
torture_percpu_rwsem_down_write(int tid __maybe_unused)624 static int torture_percpu_rwsem_down_write(int tid __maybe_unused)
625 __acquires(pcpu_rwsem)
626 {
627 percpu_down_write(&pcpu_rwsem);
628 return 0;
629 }
630
torture_percpu_rwsem_up_write(int tid __maybe_unused)631 static void torture_percpu_rwsem_up_write(int tid __maybe_unused)
632 __releases(pcpu_rwsem)
633 {
634 percpu_up_write(&pcpu_rwsem);
635 }
636
torture_percpu_rwsem_down_read(int tid __maybe_unused)637 static int torture_percpu_rwsem_down_read(int tid __maybe_unused)
638 __acquires(pcpu_rwsem)
639 {
640 percpu_down_read(&pcpu_rwsem);
641 return 0;
642 }
643
torture_percpu_rwsem_up_read(int tid __maybe_unused)644 static void torture_percpu_rwsem_up_read(int tid __maybe_unused)
645 __releases(pcpu_rwsem)
646 {
647 percpu_up_read(&pcpu_rwsem);
648 }
649
650 static struct lock_torture_ops percpu_rwsem_lock_ops = {
651 .init = torture_percpu_rwsem_init,
652 .exit = torture_percpu_rwsem_exit,
653 .writelock = torture_percpu_rwsem_down_write,
654 .write_delay = torture_rwsem_write_delay,
655 .task_boost = torture_boost_dummy,
656 .writeunlock = torture_percpu_rwsem_up_write,
657 .readlock = torture_percpu_rwsem_down_read,
658 .read_delay = torture_rwsem_read_delay,
659 .readunlock = torture_percpu_rwsem_up_read,
660 .name = "percpu_rwsem_lock"
661 };
662
663 /*
664 * Lock torture writer kthread. Repeatedly acquires and releases
665 * the lock, checking for duplicate acquisitions.
666 */
lock_torture_writer(void * arg)667 static int lock_torture_writer(void *arg)
668 {
669 struct lock_stress_stats *lwsp = arg;
670 int tid = lwsp - cxt.lwsa;
671 DEFINE_TORTURE_RANDOM(rand);
672
673 VERBOSE_TOROUT_STRING("lock_torture_writer task started");
674 set_user_nice(current, MAX_NICE);
675
676 do {
677 if ((torture_random(&rand) & 0xfffff) == 0)
678 schedule_timeout_uninterruptible(1);
679
680 cxt.cur_ops->task_boost(&rand);
681 cxt.cur_ops->writelock(tid);
682 if (WARN_ON_ONCE(lock_is_write_held))
683 lwsp->n_lock_fail++;
684 lock_is_write_held = true;
685 if (WARN_ON_ONCE(atomic_read(&lock_is_read_held)))
686 lwsp->n_lock_fail++; /* rare, but... */
687
688 lwsp->n_lock_acquired++;
689 cxt.cur_ops->write_delay(&rand);
690 lock_is_write_held = false;
691 WRITE_ONCE(last_lock_release, jiffies);
692 cxt.cur_ops->writeunlock(tid);
693
694 stutter_wait("lock_torture_writer");
695 } while (!torture_must_stop());
696
697 cxt.cur_ops->task_boost(NULL); /* reset prio */
698 torture_kthread_stopping("lock_torture_writer");
699 return 0;
700 }
701
702 /*
703 * Lock torture reader kthread. Repeatedly acquires and releases
704 * the reader lock.
705 */
lock_torture_reader(void * arg)706 static int lock_torture_reader(void *arg)
707 {
708 struct lock_stress_stats *lrsp = arg;
709 int tid = lrsp - cxt.lrsa;
710 DEFINE_TORTURE_RANDOM(rand);
711
712 VERBOSE_TOROUT_STRING("lock_torture_reader task started");
713 set_user_nice(current, MAX_NICE);
714
715 do {
716 if ((torture_random(&rand) & 0xfffff) == 0)
717 schedule_timeout_uninterruptible(1);
718
719 cxt.cur_ops->readlock(tid);
720 atomic_inc(&lock_is_read_held);
721 if (WARN_ON_ONCE(lock_is_write_held))
722 lrsp->n_lock_fail++; /* rare, but... */
723
724 lrsp->n_lock_acquired++;
725 cxt.cur_ops->read_delay(&rand);
726 atomic_dec(&lock_is_read_held);
727 cxt.cur_ops->readunlock(tid);
728
729 stutter_wait("lock_torture_reader");
730 } while (!torture_must_stop());
731 torture_kthread_stopping("lock_torture_reader");
732 return 0;
733 }
734
735 /*
736 * Create an lock-torture-statistics message in the specified buffer.
737 */
__torture_print_stats(char * page,struct lock_stress_stats * statp,bool write)738 static void __torture_print_stats(char *page,
739 struct lock_stress_stats *statp, bool write)
740 {
741 long cur;
742 bool fail = false;
743 int i, n_stress;
744 long max = 0, min = statp ? data_race(statp[0].n_lock_acquired) : 0;
745 long long sum = 0;
746
747 n_stress = write ? cxt.nrealwriters_stress : cxt.nrealreaders_stress;
748 for (i = 0; i < n_stress; i++) {
749 if (data_race(statp[i].n_lock_fail))
750 fail = true;
751 cur = data_race(statp[i].n_lock_acquired);
752 sum += cur;
753 if (max < cur)
754 max = cur;
755 if (min > cur)
756 min = cur;
757 }
758 page += sprintf(page,
759 "%s: Total: %lld Max/Min: %ld/%ld %s Fail: %d %s\n",
760 write ? "Writes" : "Reads ",
761 sum, max, min,
762 !onoff_interval && max / 2 > min ? "???" : "",
763 fail, fail ? "!!!" : "");
764 if (fail)
765 atomic_inc(&cxt.n_lock_torture_errors);
766 }
767
768 /*
769 * Print torture statistics. Caller must ensure that there is only one
770 * call to this function at a given time!!! This is normally accomplished
771 * by relying on the module system to only have one copy of the module
772 * loaded, and then by giving the lock_torture_stats kthread full control
773 * (or the init/cleanup functions when lock_torture_stats thread is not
774 * running).
775 */
lock_torture_stats_print(void)776 static void lock_torture_stats_print(void)
777 {
778 int size = cxt.nrealwriters_stress * 200 + 8192;
779 char *buf;
780
781 if (cxt.cur_ops->readlock)
782 size += cxt.nrealreaders_stress * 200 + 8192;
783
784 buf = kmalloc(size, GFP_KERNEL);
785 if (!buf) {
786 pr_err("lock_torture_stats_print: Out of memory, need: %d",
787 size);
788 return;
789 }
790
791 __torture_print_stats(buf, cxt.lwsa, true);
792 pr_alert("%s", buf);
793 kfree(buf);
794
795 if (cxt.cur_ops->readlock) {
796 buf = kmalloc(size, GFP_KERNEL);
797 if (!buf) {
798 pr_err("lock_torture_stats_print: Out of memory, need: %d",
799 size);
800 return;
801 }
802
803 __torture_print_stats(buf, cxt.lrsa, false);
804 pr_alert("%s", buf);
805 kfree(buf);
806 }
807 }
808
809 /*
810 * Periodically prints torture statistics, if periodic statistics printing
811 * was specified via the stat_interval module parameter.
812 *
813 * No need to worry about fullstop here, since this one doesn't reference
814 * volatile state or register callbacks.
815 */
lock_torture_stats(void * arg)816 static int lock_torture_stats(void *arg)
817 {
818 VERBOSE_TOROUT_STRING("lock_torture_stats task started");
819 do {
820 schedule_timeout_interruptible(stat_interval * HZ);
821 lock_torture_stats_print();
822 torture_shutdown_absorb("lock_torture_stats");
823 } while (!torture_must_stop());
824 torture_kthread_stopping("lock_torture_stats");
825 return 0;
826 }
827
828 static inline void
lock_torture_print_module_parms(struct lock_torture_ops * cur_ops,const char * tag)829 lock_torture_print_module_parms(struct lock_torture_ops *cur_ops,
830 const char *tag)
831 {
832 pr_alert("%s" TORTURE_FLAG
833 "--- %s%s: nwriters_stress=%d nreaders_stress=%d stat_interval=%d verbose=%d shuffle_interval=%d stutter=%d shutdown_secs=%d onoff_interval=%d onoff_holdoff=%d\n",
834 torture_type, tag, cxt.debug_lock ? " [debug]": "",
835 cxt.nrealwriters_stress, cxt.nrealreaders_stress, stat_interval,
836 verbose, shuffle_interval, stutter, shutdown_secs,
837 onoff_interval, onoff_holdoff);
838 }
839
lock_torture_cleanup(void)840 static void lock_torture_cleanup(void)
841 {
842 int i;
843
844 if (torture_cleanup_begin())
845 return;
846
847 /*
848 * Indicates early cleanup, meaning that the test has not run,
849 * such as when passing bogus args when loading the module.
850 * However cxt->cur_ops.init() may have been invoked, so beside
851 * perform the underlying torture-specific cleanups, cur_ops.exit()
852 * will be invoked if needed.
853 */
854 if (!cxt.lwsa && !cxt.lrsa)
855 goto end;
856
857 if (writer_tasks) {
858 for (i = 0; i < cxt.nrealwriters_stress; i++)
859 torture_stop_kthread(lock_torture_writer,
860 writer_tasks[i]);
861 kfree(writer_tasks);
862 writer_tasks = NULL;
863 }
864
865 if (reader_tasks) {
866 for (i = 0; i < cxt.nrealreaders_stress; i++)
867 torture_stop_kthread(lock_torture_reader,
868 reader_tasks[i]);
869 kfree(reader_tasks);
870 reader_tasks = NULL;
871 }
872
873 torture_stop_kthread(lock_torture_stats, stats_task);
874 lock_torture_stats_print(); /* -After- the stats thread is stopped! */
875
876 if (atomic_read(&cxt.n_lock_torture_errors))
877 lock_torture_print_module_parms(cxt.cur_ops,
878 "End of test: FAILURE");
879 else if (torture_onoff_failures())
880 lock_torture_print_module_parms(cxt.cur_ops,
881 "End of test: LOCK_HOTPLUG");
882 else
883 lock_torture_print_module_parms(cxt.cur_ops,
884 "End of test: SUCCESS");
885
886 kfree(cxt.lwsa);
887 cxt.lwsa = NULL;
888 kfree(cxt.lrsa);
889 cxt.lrsa = NULL;
890
891 end:
892 if (cxt.init_called) {
893 if (cxt.cur_ops->exit)
894 cxt.cur_ops->exit();
895 cxt.init_called = false;
896 }
897 torture_cleanup_end();
898 }
899
lock_torture_init(void)900 static int __init lock_torture_init(void)
901 {
902 int i, j;
903 int firsterr = 0;
904 static struct lock_torture_ops *torture_ops[] = {
905 &lock_busted_ops,
906 &spin_lock_ops, &spin_lock_irq_ops,
907 &rw_lock_ops, &rw_lock_irq_ops,
908 &mutex_lock_ops,
909 &ww_mutex_lock_ops,
910 #ifdef CONFIG_RT_MUTEXES
911 &rtmutex_lock_ops,
912 #endif
913 &rwsem_lock_ops,
914 &percpu_rwsem_lock_ops,
915 };
916
917 if (!torture_init_begin(torture_type, verbose))
918 return -EBUSY;
919
920 /* Process args and tell the world that the torturer is on the job. */
921 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
922 cxt.cur_ops = torture_ops[i];
923 if (strcmp(torture_type, cxt.cur_ops->name) == 0)
924 break;
925 }
926 if (i == ARRAY_SIZE(torture_ops)) {
927 pr_alert("lock-torture: invalid torture type: \"%s\"\n",
928 torture_type);
929 pr_alert("lock-torture types:");
930 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
931 pr_alert(" %s", torture_ops[i]->name);
932 pr_alert("\n");
933 firsterr = -EINVAL;
934 goto unwind;
935 }
936
937 if (nwriters_stress == 0 &&
938 (!cxt.cur_ops->readlock || nreaders_stress == 0)) {
939 pr_alert("lock-torture: must run at least one locking thread\n");
940 firsterr = -EINVAL;
941 goto unwind;
942 }
943
944 if (nwriters_stress >= 0)
945 cxt.nrealwriters_stress = nwriters_stress;
946 else
947 cxt.nrealwriters_stress = 2 * num_online_cpus();
948
949 if (cxt.cur_ops->init) {
950 cxt.cur_ops->init();
951 cxt.init_called = true;
952 }
953
954 #ifdef CONFIG_DEBUG_MUTEXES
955 if (str_has_prefix(torture_type, "mutex"))
956 cxt.debug_lock = true;
957 #endif
958 #ifdef CONFIG_DEBUG_RT_MUTEXES
959 if (str_has_prefix(torture_type, "rtmutex"))
960 cxt.debug_lock = true;
961 #endif
962 #ifdef CONFIG_DEBUG_SPINLOCK
963 if ((str_has_prefix(torture_type, "spin")) ||
964 (str_has_prefix(torture_type, "rw_lock")))
965 cxt.debug_lock = true;
966 #endif
967
968 /* Initialize the statistics so that each run gets its own numbers. */
969 if (nwriters_stress) {
970 lock_is_write_held = false;
971 cxt.lwsa = kmalloc_array(cxt.nrealwriters_stress,
972 sizeof(*cxt.lwsa),
973 GFP_KERNEL);
974 if (cxt.lwsa == NULL) {
975 VERBOSE_TOROUT_STRING("cxt.lwsa: Out of memory");
976 firsterr = -ENOMEM;
977 goto unwind;
978 }
979
980 for (i = 0; i < cxt.nrealwriters_stress; i++) {
981 cxt.lwsa[i].n_lock_fail = 0;
982 cxt.lwsa[i].n_lock_acquired = 0;
983 }
984 }
985
986 if (cxt.cur_ops->readlock) {
987 if (nreaders_stress >= 0)
988 cxt.nrealreaders_stress = nreaders_stress;
989 else {
990 /*
991 * By default distribute evenly the number of
992 * readers and writers. We still run the same number
993 * of threads as the writer-only locks default.
994 */
995 if (nwriters_stress < 0) /* user doesn't care */
996 cxt.nrealwriters_stress = num_online_cpus();
997 cxt.nrealreaders_stress = cxt.nrealwriters_stress;
998 }
999
1000 if (nreaders_stress) {
1001 cxt.lrsa = kmalloc_array(cxt.nrealreaders_stress,
1002 sizeof(*cxt.lrsa),
1003 GFP_KERNEL);
1004 if (cxt.lrsa == NULL) {
1005 VERBOSE_TOROUT_STRING("cxt.lrsa: Out of memory");
1006 firsterr = -ENOMEM;
1007 kfree(cxt.lwsa);
1008 cxt.lwsa = NULL;
1009 goto unwind;
1010 }
1011
1012 for (i = 0; i < cxt.nrealreaders_stress; i++) {
1013 cxt.lrsa[i].n_lock_fail = 0;
1014 cxt.lrsa[i].n_lock_acquired = 0;
1015 }
1016 }
1017 }
1018
1019 lock_torture_print_module_parms(cxt.cur_ops, "Start of test");
1020
1021 /* Prepare torture context. */
1022 if (onoff_interval > 0) {
1023 firsterr = torture_onoff_init(onoff_holdoff * HZ,
1024 onoff_interval * HZ, NULL);
1025 if (torture_init_error(firsterr))
1026 goto unwind;
1027 }
1028 if (shuffle_interval > 0) {
1029 firsterr = torture_shuffle_init(shuffle_interval);
1030 if (torture_init_error(firsterr))
1031 goto unwind;
1032 }
1033 if (shutdown_secs > 0) {
1034 firsterr = torture_shutdown_init(shutdown_secs,
1035 lock_torture_cleanup);
1036 if (torture_init_error(firsterr))
1037 goto unwind;
1038 }
1039 if (stutter > 0) {
1040 firsterr = torture_stutter_init(stutter, stutter);
1041 if (torture_init_error(firsterr))
1042 goto unwind;
1043 }
1044
1045 if (nwriters_stress) {
1046 writer_tasks = kcalloc(cxt.nrealwriters_stress,
1047 sizeof(writer_tasks[0]),
1048 GFP_KERNEL);
1049 if (writer_tasks == NULL) {
1050 TOROUT_ERRSTRING("writer_tasks: Out of memory");
1051 firsterr = -ENOMEM;
1052 goto unwind;
1053 }
1054 }
1055
1056 if (cxt.cur_ops->readlock) {
1057 reader_tasks = kcalloc(cxt.nrealreaders_stress,
1058 sizeof(reader_tasks[0]),
1059 GFP_KERNEL);
1060 if (reader_tasks == NULL) {
1061 TOROUT_ERRSTRING("reader_tasks: Out of memory");
1062 kfree(writer_tasks);
1063 writer_tasks = NULL;
1064 firsterr = -ENOMEM;
1065 goto unwind;
1066 }
1067 }
1068
1069 /*
1070 * Create the kthreads and start torturing (oh, those poor little locks).
1071 *
1072 * TODO: Note that we interleave writers with readers, giving writers a
1073 * slight advantage, by creating its kthread first. This can be modified
1074 * for very specific needs, or even let the user choose the policy, if
1075 * ever wanted.
1076 */
1077 for (i = 0, j = 0; i < cxt.nrealwriters_stress ||
1078 j < cxt.nrealreaders_stress; i++, j++) {
1079 if (i >= cxt.nrealwriters_stress)
1080 goto create_reader;
1081
1082 /* Create writer. */
1083 firsterr = torture_create_kthread(lock_torture_writer, &cxt.lwsa[i],
1084 writer_tasks[i]);
1085 if (torture_init_error(firsterr))
1086 goto unwind;
1087
1088 create_reader:
1089 if (cxt.cur_ops->readlock == NULL || (j >= cxt.nrealreaders_stress))
1090 continue;
1091 /* Create reader. */
1092 firsterr = torture_create_kthread(lock_torture_reader, &cxt.lrsa[j],
1093 reader_tasks[j]);
1094 if (torture_init_error(firsterr))
1095 goto unwind;
1096 }
1097 if (stat_interval > 0) {
1098 firsterr = torture_create_kthread(lock_torture_stats, NULL,
1099 stats_task);
1100 if (torture_init_error(firsterr))
1101 goto unwind;
1102 }
1103 torture_init_end();
1104 return 0;
1105
1106 unwind:
1107 torture_init_end();
1108 lock_torture_cleanup();
1109 if (shutdown_secs) {
1110 WARN_ON(!IS_MODULE(CONFIG_LOCK_TORTURE_TEST));
1111 kernel_power_off();
1112 }
1113 return firsterr;
1114 }
1115
1116 module_init(lock_torture_init);
1117 module_exit(lock_torture_cleanup);
1118