1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update module-based scalability-test facility
4 *
5 * Copyright (C) IBM Corporation, 2015
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 #define pr_fmt(fmt) fmt
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
41
42 #include "rcu.h"
43
44 MODULE_LICENSE("GPL");
45 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
46
47 #define SCALE_FLAG "-scale:"
48 #define SCALEOUT_STRING(s) \
49 pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
50 #define VERBOSE_SCALEOUT_STRING(s) \
51 do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
52 #define SCALEOUT_ERRSTRING(s) \
53 pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
54
55 /*
56 * The intended use cases for the nreaders and nwriters module parameters
57 * are as follows:
58 *
59 * 1. Specify only the nr_cpus kernel boot parameter. This will
60 * set both nreaders and nwriters to the value specified by
61 * nr_cpus for a mixed reader/writer test.
62 *
63 * 2. Specify the nr_cpus kernel boot parameter, but set
64 * rcuscale.nreaders to zero. This will set nwriters to the
65 * value specified by nr_cpus for an update-only test.
66 *
67 * 3. Specify the nr_cpus kernel boot parameter, but set
68 * rcuscale.nwriters to zero. This will set nreaders to the
69 * value specified by nr_cpus for a read-only test.
70 *
71 * Various other use cases may of course be specified.
72 *
73 * Note that this test's readers are intended only as a test load for
74 * the writers. The reader scalability statistics will be overly
75 * pessimistic due to the per-critical-section interrupt disabling,
76 * test-end checks, and the pair of calls through pointers.
77 */
78
79 #ifdef MODULE
80 # define RCUSCALE_SHUTDOWN 0
81 #else
82 # define RCUSCALE_SHUTDOWN 1
83 #endif
84
85 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
86 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
87 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
88 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
89 torture_param(int, nreaders, -1, "Number of RCU reader threads");
90 torture_param(int, nwriters, -1, "Number of RCU updater threads");
91 torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
92 "Shutdown at end of scalability tests.");
93 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
94 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
95 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
96 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
97
98 static char *scale_type = "rcu";
99 module_param(scale_type, charp, 0444);
100 MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
101
102 static int nrealreaders;
103 static int nrealwriters;
104 static struct task_struct **writer_tasks;
105 static struct task_struct **reader_tasks;
106 static struct task_struct *shutdown_task;
107
108 static u64 **writer_durations;
109 static int *writer_n_durations;
110 static atomic_t n_rcu_scale_reader_started;
111 static atomic_t n_rcu_scale_writer_started;
112 static atomic_t n_rcu_scale_writer_finished;
113 static wait_queue_head_t shutdown_wq;
114 static u64 t_rcu_scale_writer_started;
115 static u64 t_rcu_scale_writer_finished;
116 static unsigned long b_rcu_gp_test_started;
117 static unsigned long b_rcu_gp_test_finished;
118 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
119
120 #define MAX_MEAS 10000
121 #define MIN_MEAS 100
122
123 /*
124 * Operations vector for selecting different types of tests.
125 */
126
127 struct rcu_scale_ops {
128 int ptype;
129 void (*init)(void);
130 void (*cleanup)(void);
131 int (*readlock)(void);
132 void (*readunlock)(int idx);
133 unsigned long (*get_gp_seq)(void);
134 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
135 unsigned long (*exp_completed)(void);
136 void (*async)(struct rcu_head *head, rcu_callback_t func);
137 void (*gp_barrier)(void);
138 void (*sync)(void);
139 void (*exp_sync)(void);
140 const char *name;
141 };
142
143 static struct rcu_scale_ops *cur_ops;
144
145 /*
146 * Definitions for rcu scalability testing.
147 */
148
rcu_scale_read_lock(void)149 static int rcu_scale_read_lock(void) __acquires(RCU)
150 {
151 rcu_read_lock();
152 return 0;
153 }
154
rcu_scale_read_unlock(int idx)155 static void rcu_scale_read_unlock(int idx) __releases(RCU)
156 {
157 rcu_read_unlock();
158 }
159
rcu_no_completed(void)160 static unsigned long __maybe_unused rcu_no_completed(void)
161 {
162 return 0;
163 }
164
rcu_sync_scale_init(void)165 static void rcu_sync_scale_init(void)
166 {
167 }
168
169 static struct rcu_scale_ops rcu_ops = {
170 .ptype = RCU_FLAVOR,
171 .init = rcu_sync_scale_init,
172 .readlock = rcu_scale_read_lock,
173 .readunlock = rcu_scale_read_unlock,
174 .get_gp_seq = rcu_get_gp_seq,
175 .gp_diff = rcu_seq_diff,
176 .exp_completed = rcu_exp_batches_completed,
177 .async = call_rcu,
178 .gp_barrier = rcu_barrier,
179 .sync = synchronize_rcu,
180 .exp_sync = synchronize_rcu_expedited,
181 .name = "rcu"
182 };
183
184 /*
185 * Definitions for srcu scalability testing.
186 */
187
188 DEFINE_STATIC_SRCU(srcu_ctl_scale);
189 static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
190
srcu_scale_read_lock(void)191 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
192 {
193 return srcu_read_lock(srcu_ctlp);
194 }
195
srcu_scale_read_unlock(int idx)196 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
197 {
198 srcu_read_unlock(srcu_ctlp, idx);
199 }
200
srcu_scale_completed(void)201 static unsigned long srcu_scale_completed(void)
202 {
203 return srcu_batches_completed(srcu_ctlp);
204 }
205
srcu_call_rcu(struct rcu_head * head,rcu_callback_t func)206 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
207 {
208 call_srcu(srcu_ctlp, head, func);
209 }
210
srcu_rcu_barrier(void)211 static void srcu_rcu_barrier(void)
212 {
213 srcu_barrier(srcu_ctlp);
214 }
215
srcu_scale_synchronize(void)216 static void srcu_scale_synchronize(void)
217 {
218 synchronize_srcu(srcu_ctlp);
219 }
220
srcu_scale_synchronize_expedited(void)221 static void srcu_scale_synchronize_expedited(void)
222 {
223 synchronize_srcu_expedited(srcu_ctlp);
224 }
225
226 static struct rcu_scale_ops srcu_ops = {
227 .ptype = SRCU_FLAVOR,
228 .init = rcu_sync_scale_init,
229 .readlock = srcu_scale_read_lock,
230 .readunlock = srcu_scale_read_unlock,
231 .get_gp_seq = srcu_scale_completed,
232 .gp_diff = rcu_seq_diff,
233 .exp_completed = srcu_scale_completed,
234 .async = srcu_call_rcu,
235 .gp_barrier = srcu_rcu_barrier,
236 .sync = srcu_scale_synchronize,
237 .exp_sync = srcu_scale_synchronize_expedited,
238 .name = "srcu"
239 };
240
241 static struct srcu_struct srcud;
242
srcu_sync_scale_init(void)243 static void srcu_sync_scale_init(void)
244 {
245 srcu_ctlp = &srcud;
246 init_srcu_struct(srcu_ctlp);
247 }
248
srcu_sync_scale_cleanup(void)249 static void srcu_sync_scale_cleanup(void)
250 {
251 cleanup_srcu_struct(srcu_ctlp);
252 }
253
254 static struct rcu_scale_ops srcud_ops = {
255 .ptype = SRCU_FLAVOR,
256 .init = srcu_sync_scale_init,
257 .cleanup = srcu_sync_scale_cleanup,
258 .readlock = srcu_scale_read_lock,
259 .readunlock = srcu_scale_read_unlock,
260 .get_gp_seq = srcu_scale_completed,
261 .gp_diff = rcu_seq_diff,
262 .exp_completed = srcu_scale_completed,
263 .async = srcu_call_rcu,
264 .gp_barrier = srcu_rcu_barrier,
265 .sync = srcu_scale_synchronize,
266 .exp_sync = srcu_scale_synchronize_expedited,
267 .name = "srcud"
268 };
269
270 /*
271 * Definitions for RCU-tasks scalability testing.
272 */
273
tasks_scale_read_lock(void)274 static int tasks_scale_read_lock(void)
275 {
276 return 0;
277 }
278
tasks_scale_read_unlock(int idx)279 static void tasks_scale_read_unlock(int idx)
280 {
281 }
282
283 static struct rcu_scale_ops tasks_ops = {
284 .ptype = RCU_TASKS_FLAVOR,
285 .init = rcu_sync_scale_init,
286 .readlock = tasks_scale_read_lock,
287 .readunlock = tasks_scale_read_unlock,
288 .get_gp_seq = rcu_no_completed,
289 .gp_diff = rcu_seq_diff,
290 .async = call_rcu_tasks,
291 .gp_barrier = rcu_barrier_tasks,
292 .sync = synchronize_rcu_tasks,
293 .exp_sync = synchronize_rcu_tasks,
294 .name = "tasks"
295 };
296
rcuscale_seq_diff(unsigned long new,unsigned long old)297 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
298 {
299 if (!cur_ops->gp_diff)
300 return new - old;
301 return cur_ops->gp_diff(new, old);
302 }
303
304 /*
305 * If scalability tests complete, wait for shutdown to commence.
306 */
rcu_scale_wait_shutdown(void)307 static void rcu_scale_wait_shutdown(void)
308 {
309 cond_resched_tasks_rcu_qs();
310 if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
311 return;
312 while (!torture_must_stop())
313 schedule_timeout_uninterruptible(1);
314 }
315
316 /*
317 * RCU scalability reader kthread. Repeatedly does empty RCU read-side
318 * critical section, minimizing update-side interference. However, the
319 * point of this test is not to evaluate reader scalability, but instead
320 * to serve as a test load for update-side scalability testing.
321 */
322 static int
rcu_scale_reader(void * arg)323 rcu_scale_reader(void *arg)
324 {
325 unsigned long flags;
326 int idx;
327 long me = (long)arg;
328
329 VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
330 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
331 set_user_nice(current, MAX_NICE);
332 atomic_inc(&n_rcu_scale_reader_started);
333
334 do {
335 local_irq_save(flags);
336 idx = cur_ops->readlock();
337 cur_ops->readunlock(idx);
338 local_irq_restore(flags);
339 rcu_scale_wait_shutdown();
340 } while (!torture_must_stop());
341 torture_kthread_stopping("rcu_scale_reader");
342 return 0;
343 }
344
345 /*
346 * Callback function for asynchronous grace periods from rcu_scale_writer().
347 */
rcu_scale_async_cb(struct rcu_head * rhp)348 static void rcu_scale_async_cb(struct rcu_head *rhp)
349 {
350 atomic_dec(this_cpu_ptr(&n_async_inflight));
351 kfree(rhp);
352 }
353
354 /*
355 * RCU scale writer kthread. Repeatedly does a grace period.
356 */
357 static int
rcu_scale_writer(void * arg)358 rcu_scale_writer(void *arg)
359 {
360 int i = 0;
361 int i_max;
362 long me = (long)arg;
363 struct rcu_head *rhp = NULL;
364 bool started = false, done = false, alldone = false;
365 u64 t;
366 u64 *wdp;
367 u64 *wdpp = writer_durations[me];
368
369 VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
370 WARN_ON(!wdpp);
371 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
372 sched_set_fifo_low(current);
373
374 if (holdoff)
375 schedule_timeout_idle(holdoff * HZ);
376
377 /*
378 * Wait until rcu_end_inkernel_boot() is called for normal GP tests
379 * so that RCU is not always expedited for normal GP tests.
380 * The system_state test is approximate, but works well in practice.
381 */
382 while (!gp_exp && system_state != SYSTEM_RUNNING)
383 schedule_timeout_uninterruptible(1);
384
385 t = ktime_get_mono_fast_ns();
386 if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
387 t_rcu_scale_writer_started = t;
388 if (gp_exp) {
389 b_rcu_gp_test_started =
390 cur_ops->exp_completed() / 2;
391 } else {
392 b_rcu_gp_test_started = cur_ops->get_gp_seq();
393 }
394 }
395
396 do {
397 if (writer_holdoff)
398 udelay(writer_holdoff);
399 wdp = &wdpp[i];
400 *wdp = ktime_get_mono_fast_ns();
401 if (gp_async) {
402 retry:
403 if (!rhp)
404 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
405 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
406 atomic_inc(this_cpu_ptr(&n_async_inflight));
407 cur_ops->async(rhp, rcu_scale_async_cb);
408 rhp = NULL;
409 } else if (!kthread_should_stop()) {
410 cur_ops->gp_barrier();
411 goto retry;
412 } else {
413 kfree(rhp); /* Because we are stopping. */
414 }
415 } else if (gp_exp) {
416 cur_ops->exp_sync();
417 } else {
418 cur_ops->sync();
419 }
420 t = ktime_get_mono_fast_ns();
421 *wdp = t - *wdp;
422 i_max = i;
423 if (!started &&
424 atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
425 started = true;
426 if (!done && i >= MIN_MEAS) {
427 done = true;
428 sched_set_normal(current, 0);
429 pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
430 scale_type, SCALE_FLAG, me, MIN_MEAS);
431 if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
432 nrealwriters) {
433 schedule_timeout_interruptible(10);
434 rcu_ftrace_dump(DUMP_ALL);
435 SCALEOUT_STRING("Test complete");
436 t_rcu_scale_writer_finished = t;
437 if (gp_exp) {
438 b_rcu_gp_test_finished =
439 cur_ops->exp_completed() / 2;
440 } else {
441 b_rcu_gp_test_finished =
442 cur_ops->get_gp_seq();
443 }
444 if (shutdown) {
445 smp_mb(); /* Assign before wake. */
446 wake_up(&shutdown_wq);
447 }
448 }
449 }
450 if (done && !alldone &&
451 atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
452 alldone = true;
453 if (started && !alldone && i < MAX_MEAS - 1)
454 i++;
455 rcu_scale_wait_shutdown();
456 } while (!torture_must_stop());
457 if (gp_async) {
458 cur_ops->gp_barrier();
459 }
460 writer_n_durations[me] = i_max + 1;
461 torture_kthread_stopping("rcu_scale_writer");
462 return 0;
463 }
464
465 static void
rcu_scale_print_module_parms(struct rcu_scale_ops * cur_ops,const char * tag)466 rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
467 {
468 pr_alert("%s" SCALE_FLAG
469 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
470 scale_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
471 }
472
473 /*
474 * Return the number if non-negative. If -1, the number of CPUs.
475 * If less than -1, that much less than the number of CPUs, but
476 * at least one.
477 */
compute_real(int n)478 static int compute_real(int n)
479 {
480 int nr;
481
482 if (n >= 0) {
483 nr = n;
484 } else {
485 nr = num_online_cpus() + 1 + n;
486 if (nr <= 0)
487 nr = 1;
488 }
489 return nr;
490 }
491
492 /*
493 * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
494 * of iterations and measure total time and number of GP for all iterations to complete.
495 */
496
497 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
498 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
499 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
500
501 static struct task_struct **kfree_reader_tasks;
502 static int kfree_nrealthreads;
503 static atomic_t n_kfree_scale_thread_started;
504 static atomic_t n_kfree_scale_thread_ended;
505
506 struct kfree_obj {
507 char kfree_obj[8];
508 struct rcu_head rh;
509 };
510
511 static int
kfree_scale_thread(void * arg)512 kfree_scale_thread(void *arg)
513 {
514 int i, loop = 0;
515 long me = (long)arg;
516 struct kfree_obj *alloc_ptr;
517 u64 start_time, end_time;
518 long long mem_begin, mem_during = 0;
519
520 VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
521 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
522 set_user_nice(current, MAX_NICE);
523
524 start_time = ktime_get_mono_fast_ns();
525
526 if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
527 if (gp_exp)
528 b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
529 else
530 b_rcu_gp_test_started = cur_ops->get_gp_seq();
531 }
532
533 do {
534 if (!mem_during) {
535 mem_during = mem_begin = si_mem_available();
536 } else if (loop % (kfree_loops / 4) == 0) {
537 mem_during = (mem_during + si_mem_available()) / 2;
538 }
539
540 for (i = 0; i < kfree_alloc_num; i++) {
541 alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
542 if (!alloc_ptr)
543 return -ENOMEM;
544
545 kfree_rcu(alloc_ptr, rh);
546 }
547
548 cond_resched();
549 } while (!torture_must_stop() && ++loop < kfree_loops);
550
551 if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
552 end_time = ktime_get_mono_fast_ns();
553
554 if (gp_exp)
555 b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
556 else
557 b_rcu_gp_test_finished = cur_ops->get_gp_seq();
558
559 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
560 (unsigned long long)(end_time - start_time), kfree_loops,
561 rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
562 (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
563
564 if (shutdown) {
565 smp_mb(); /* Assign before wake. */
566 wake_up(&shutdown_wq);
567 }
568 }
569
570 torture_kthread_stopping("kfree_scale_thread");
571 return 0;
572 }
573
574 static void
kfree_scale_cleanup(void)575 kfree_scale_cleanup(void)
576 {
577 int i;
578
579 if (torture_cleanup_begin())
580 return;
581
582 if (kfree_reader_tasks) {
583 for (i = 0; i < kfree_nrealthreads; i++)
584 torture_stop_kthread(kfree_scale_thread,
585 kfree_reader_tasks[i]);
586 kfree(kfree_reader_tasks);
587 }
588
589 torture_cleanup_end();
590 }
591
592 /*
593 * shutdown kthread. Just waits to be awakened, then shuts down system.
594 */
595 static int
kfree_scale_shutdown(void * arg)596 kfree_scale_shutdown(void *arg)
597 {
598 wait_event_idle(shutdown_wq,
599 atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
600
601 smp_mb(); /* Wake before output. */
602
603 kfree_scale_cleanup();
604 kernel_power_off();
605 return -EINVAL;
606 }
607
608 static int __init
kfree_scale_init(void)609 kfree_scale_init(void)
610 {
611 long i;
612 int firsterr = 0;
613
614 kfree_nrealthreads = compute_real(kfree_nthreads);
615 /* Start up the kthreads. */
616 if (shutdown) {
617 init_waitqueue_head(&shutdown_wq);
618 firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
619 shutdown_task);
620 if (firsterr)
621 goto unwind;
622 schedule_timeout_uninterruptible(1);
623 }
624
625 pr_alert("kfree object size=%zu\n", kfree_mult * sizeof(struct kfree_obj));
626
627 kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
628 GFP_KERNEL);
629 if (kfree_reader_tasks == NULL) {
630 firsterr = -ENOMEM;
631 goto unwind;
632 }
633
634 for (i = 0; i < kfree_nrealthreads; i++) {
635 firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
636 kfree_reader_tasks[i]);
637 if (firsterr)
638 goto unwind;
639 }
640
641 while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
642 schedule_timeout_uninterruptible(1);
643
644 torture_init_end();
645 return 0;
646
647 unwind:
648 torture_init_end();
649 kfree_scale_cleanup();
650 return firsterr;
651 }
652
653 static void
rcu_scale_cleanup(void)654 rcu_scale_cleanup(void)
655 {
656 int i;
657 int j;
658 int ngps = 0;
659 u64 *wdp;
660 u64 *wdpp;
661
662 /*
663 * Would like warning at start, but everything is expedited
664 * during the mid-boot phase, so have to wait till the end.
665 */
666 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
667 SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
668 if (rcu_gp_is_normal() && gp_exp)
669 SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
670 if (gp_exp && gp_async)
671 SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
672
673 if (kfree_rcu_test) {
674 kfree_scale_cleanup();
675 return;
676 }
677
678 if (torture_cleanup_begin())
679 return;
680 if (!cur_ops) {
681 torture_cleanup_end();
682 return;
683 }
684
685 if (reader_tasks) {
686 for (i = 0; i < nrealreaders; i++)
687 torture_stop_kthread(rcu_scale_reader,
688 reader_tasks[i]);
689 kfree(reader_tasks);
690 }
691
692 if (writer_tasks) {
693 for (i = 0; i < nrealwriters; i++) {
694 torture_stop_kthread(rcu_scale_writer,
695 writer_tasks[i]);
696 if (!writer_n_durations)
697 continue;
698 j = writer_n_durations[i];
699 pr_alert("%s%s writer %d gps: %d\n",
700 scale_type, SCALE_FLAG, i, j);
701 ngps += j;
702 }
703 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
704 scale_type, SCALE_FLAG,
705 t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
706 t_rcu_scale_writer_finished -
707 t_rcu_scale_writer_started,
708 ngps,
709 rcuscale_seq_diff(b_rcu_gp_test_finished,
710 b_rcu_gp_test_started));
711 for (i = 0; i < nrealwriters; i++) {
712 if (!writer_durations)
713 break;
714 if (!writer_n_durations)
715 continue;
716 wdpp = writer_durations[i];
717 if (!wdpp)
718 continue;
719 for (j = 0; j < writer_n_durations[i]; j++) {
720 wdp = &wdpp[j];
721 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
722 scale_type, SCALE_FLAG,
723 i, j, *wdp);
724 if (j % 100 == 0)
725 schedule_timeout_uninterruptible(1);
726 }
727 kfree(writer_durations[i]);
728 }
729 kfree(writer_tasks);
730 kfree(writer_durations);
731 kfree(writer_n_durations);
732 }
733
734 /* Do torture-type-specific cleanup operations. */
735 if (cur_ops->cleanup != NULL)
736 cur_ops->cleanup();
737
738 torture_cleanup_end();
739 }
740
741 /*
742 * RCU scalability shutdown kthread. Just waits to be awakened, then shuts
743 * down system.
744 */
745 static int
rcu_scale_shutdown(void * arg)746 rcu_scale_shutdown(void *arg)
747 {
748 wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
749 smp_mb(); /* Wake before output. */
750 rcu_scale_cleanup();
751 kernel_power_off();
752 return -EINVAL;
753 }
754
755 static int __init
rcu_scale_init(void)756 rcu_scale_init(void)
757 {
758 long i;
759 int firsterr = 0;
760 static struct rcu_scale_ops *scale_ops[] = {
761 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
762 };
763
764 if (!torture_init_begin(scale_type, verbose))
765 return -EBUSY;
766
767 /* Process args and announce that the scalability'er is on the job. */
768 for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
769 cur_ops = scale_ops[i];
770 if (strcmp(scale_type, cur_ops->name) == 0)
771 break;
772 }
773 if (i == ARRAY_SIZE(scale_ops)) {
774 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
775 pr_alert("rcu-scale types:");
776 for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
777 pr_cont(" %s", scale_ops[i]->name);
778 pr_cont("\n");
779 WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
780 firsterr = -EINVAL;
781 cur_ops = NULL;
782 goto unwind;
783 }
784 if (cur_ops->init)
785 cur_ops->init();
786
787 if (kfree_rcu_test)
788 return kfree_scale_init();
789
790 nrealwriters = compute_real(nwriters);
791 nrealreaders = compute_real(nreaders);
792 atomic_set(&n_rcu_scale_reader_started, 0);
793 atomic_set(&n_rcu_scale_writer_started, 0);
794 atomic_set(&n_rcu_scale_writer_finished, 0);
795 rcu_scale_print_module_parms(cur_ops, "Start of test");
796
797 /* Start up the kthreads. */
798
799 if (shutdown) {
800 init_waitqueue_head(&shutdown_wq);
801 firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
802 shutdown_task);
803 if (firsterr)
804 goto unwind;
805 schedule_timeout_uninterruptible(1);
806 }
807 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
808 GFP_KERNEL);
809 if (reader_tasks == NULL) {
810 SCALEOUT_ERRSTRING("out of memory");
811 firsterr = -ENOMEM;
812 goto unwind;
813 }
814 for (i = 0; i < nrealreaders; i++) {
815 firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
816 reader_tasks[i]);
817 if (firsterr)
818 goto unwind;
819 }
820 while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
821 schedule_timeout_uninterruptible(1);
822 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
823 GFP_KERNEL);
824 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
825 GFP_KERNEL);
826 writer_n_durations =
827 kcalloc(nrealwriters, sizeof(*writer_n_durations),
828 GFP_KERNEL);
829 if (!writer_tasks || !writer_durations || !writer_n_durations) {
830 SCALEOUT_ERRSTRING("out of memory");
831 firsterr = -ENOMEM;
832 goto unwind;
833 }
834 for (i = 0; i < nrealwriters; i++) {
835 writer_durations[i] =
836 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
837 GFP_KERNEL);
838 if (!writer_durations[i]) {
839 firsterr = -ENOMEM;
840 goto unwind;
841 }
842 firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
843 writer_tasks[i]);
844 if (firsterr)
845 goto unwind;
846 }
847 torture_init_end();
848 return 0;
849
850 unwind:
851 torture_init_end();
852 rcu_scale_cleanup();
853 return firsterr;
854 }
855
856 module_init(rcu_scale_init);
857 module_exit(rcu_scale_cleanup);
858