1 /*
2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition
3 * Internal non-public definitions that provide either classic
4 * or preemptible semantics.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 * Copyright (c) 2010 Linaro
21 *
22 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
23 */
24
25 #include <linux/kthread.h>
26 #include <linux/module.h>
27 #include <linux/debugfs.h>
28 #include <linux/seq_file.h>
29
30 /* Global control variables for rcupdate callback mechanism. */
31 struct rcu_ctrlblk {
32 struct rcu_head *rcucblist; /* List of pending callbacks (CBs). */
33 struct rcu_head **donetail; /* ->next pointer of last "done" CB. */
34 struct rcu_head **curtail; /* ->next pointer of last CB. */
35 RCU_TRACE(long qlen); /* Number of pending CBs. */
36 RCU_TRACE(unsigned long gp_start); /* Start time for stalls. */
37 RCU_TRACE(unsigned long ticks_this_gp); /* Statistic for stalls. */
38 RCU_TRACE(unsigned long jiffies_stall); /* Jiffies at next stall. */
39 RCU_TRACE(const char *name); /* Name of RCU type. */
40 };
41
42 /* Definition for rcupdate control block. */
43 static struct rcu_ctrlblk rcu_sched_ctrlblk = {
44 .donetail = &rcu_sched_ctrlblk.rcucblist,
45 .curtail = &rcu_sched_ctrlblk.rcucblist,
46 RCU_TRACE(.name = "rcu_sched")
47 };
48
49 static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50 .donetail = &rcu_bh_ctrlblk.rcucblist,
51 .curtail = &rcu_bh_ctrlblk.rcucblist,
52 RCU_TRACE(.name = "rcu_bh")
53 };
54
55 #ifdef CONFIG_DEBUG_LOCK_ALLOC
56 #include <linux/kernel_stat.h>
57
58 int rcu_scheduler_active __read_mostly;
59 EXPORT_SYMBOL_GPL(rcu_scheduler_active);
60
61 /*
62 * During boot, we forgive RCU lockdep issues. After this function is
63 * invoked, we start taking RCU lockdep issues seriously.
64 */
rcu_scheduler_starting(void)65 void __init rcu_scheduler_starting(void)
66 {
67 WARN_ON(nr_context_switches() > 0);
68 rcu_scheduler_active = 1;
69 }
70
71 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
72
73 #ifdef CONFIG_RCU_TRACE
74
rcu_trace_sub_qlen(struct rcu_ctrlblk * rcp,int n)75 static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
76 {
77 unsigned long flags;
78
79 local_irq_save(flags);
80 rcp->qlen -= n;
81 local_irq_restore(flags);
82 }
83
84 /*
85 * Dump statistics for TINY_RCU, such as they are.
86 */
show_tiny_stats(struct seq_file * m,void * unused)87 static int show_tiny_stats(struct seq_file *m, void *unused)
88 {
89 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
90 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
91 return 0;
92 }
93
show_tiny_stats_open(struct inode * inode,struct file * file)94 static int show_tiny_stats_open(struct inode *inode, struct file *file)
95 {
96 return single_open(file, show_tiny_stats, NULL);
97 }
98
99 static const struct file_operations show_tiny_stats_fops = {
100 .owner = THIS_MODULE,
101 .open = show_tiny_stats_open,
102 .read = seq_read,
103 .llseek = seq_lseek,
104 .release = single_release,
105 };
106
107 static struct dentry *rcudir;
108
rcutiny_trace_init(void)109 static int __init rcutiny_trace_init(void)
110 {
111 struct dentry *retval;
112
113 rcudir = debugfs_create_dir("rcu", NULL);
114 if (!rcudir)
115 goto free_out;
116 retval = debugfs_create_file("rcudata", 0444, rcudir,
117 NULL, &show_tiny_stats_fops);
118 if (!retval)
119 goto free_out;
120 return 0;
121 free_out:
122 debugfs_remove_recursive(rcudir);
123 return 1;
124 }
125
rcutiny_trace_cleanup(void)126 static void __exit rcutiny_trace_cleanup(void)
127 {
128 debugfs_remove_recursive(rcudir);
129 }
130
131 module_init(rcutiny_trace_init);
132 module_exit(rcutiny_trace_cleanup);
133
134 MODULE_AUTHOR("Paul E. McKenney");
135 MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
136 MODULE_LICENSE("GPL");
137
check_cpu_stall(struct rcu_ctrlblk * rcp)138 static void check_cpu_stall(struct rcu_ctrlblk *rcp)
139 {
140 unsigned long j;
141 unsigned long js;
142
143 if (rcu_cpu_stall_suppress)
144 return;
145 rcp->ticks_this_gp++;
146 j = jiffies;
147 js = READ_ONCE(rcp->jiffies_stall);
148 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
149 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
150 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
151 jiffies - rcp->gp_start, rcp->qlen);
152 dump_stack();
153 WRITE_ONCE(rcp->jiffies_stall,
154 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
155 } else if (ULONG_CMP_GE(j, js)) {
156 WRITE_ONCE(rcp->jiffies_stall,
157 jiffies + rcu_jiffies_till_stall_check());
158 }
159 }
160
reset_cpu_stall_ticks(struct rcu_ctrlblk * rcp)161 static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
162 {
163 rcp->ticks_this_gp = 0;
164 rcp->gp_start = jiffies;
165 WRITE_ONCE(rcp->jiffies_stall,
166 jiffies + rcu_jiffies_till_stall_check());
167 }
168
check_cpu_stalls(void)169 static void check_cpu_stalls(void)
170 {
171 RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
172 RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
173 }
174
175 #endif /* #ifdef CONFIG_RCU_TRACE */
176