1 /*
2 * Machine check injection support.
3 * Copyright 2008 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; version 2
8 * of the License.
9 *
10 * Authors:
11 * Andi Kleen
12 * Ying Huang
13 */
14 #include <linux/uaccess.h>
15 #include <linux/module.h>
16 #include <linux/timer.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/fs.h>
20 #include <linux/preempt.h>
21 #include <linux/smp.h>
22 #include <linux/notifier.h>
23 #include <linux/kdebug.h>
24 #include <linux/cpu.h>
25 #include <linux/sched.h>
26 #include <linux/gfp.h>
27 #include <asm/mce.h>
28 #include <asm/apic.h>
29 #include <asm/nmi.h>
30
31 /* Update fake mce registers on current CPU. */
inject_mce(struct mce * m)32 static void inject_mce(struct mce *m)
33 {
34 struct mce *i = &per_cpu(injectm, m->extcpu);
35
36 /* Make sure no one reads partially written injectm */
37 i->finished = 0;
38 mb();
39 m->finished = 0;
40 /* First set the fields after finished */
41 i->extcpu = m->extcpu;
42 mb();
43 /* Now write record in order, finished last (except above) */
44 memcpy(i, m, sizeof(struct mce));
45 /* Finally activate it */
46 mb();
47 i->finished = 1;
48 }
49
raise_poll(struct mce * m)50 static void raise_poll(struct mce *m)
51 {
52 unsigned long flags;
53 mce_banks_t b;
54
55 memset(&b, 0xff, sizeof(mce_banks_t));
56 local_irq_save(flags);
57 machine_check_poll(0, &b);
58 local_irq_restore(flags);
59 m->finished = 0;
60 }
61
raise_exception(struct mce * m,struct pt_regs * pregs)62 static void raise_exception(struct mce *m, struct pt_regs *pregs)
63 {
64 struct pt_regs regs;
65 unsigned long flags;
66
67 if (!pregs) {
68 memset(®s, 0, sizeof(struct pt_regs));
69 regs.ip = m->ip;
70 regs.cs = m->cs;
71 pregs = ®s;
72 }
73 /* in mcheck exeception handler, irq will be disabled */
74 local_irq_save(flags);
75 do_machine_check(pregs, 0);
76 local_irq_restore(flags);
77 m->finished = 0;
78 }
79
80 static cpumask_var_t mce_inject_cpumask;
81 static DEFINE_MUTEX(mce_inject_mutex);
82
mce_raise_notify(unsigned int cmd,struct pt_regs * regs)83 static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs)
84 {
85 int cpu = smp_processor_id();
86 struct mce *m = &__get_cpu_var(injectm);
87 if (!cpumask_test_cpu(cpu, mce_inject_cpumask))
88 return NMI_DONE;
89 cpumask_clear_cpu(cpu, mce_inject_cpumask);
90 if (m->inject_flags & MCJ_EXCEPTION)
91 raise_exception(m, regs);
92 else if (m->status)
93 raise_poll(m);
94 return NMI_HANDLED;
95 }
96
mce_irq_ipi(void * info)97 static void mce_irq_ipi(void *info)
98 {
99 int cpu = smp_processor_id();
100 struct mce *m = &__get_cpu_var(injectm);
101
102 if (cpumask_test_cpu(cpu, mce_inject_cpumask) &&
103 m->inject_flags & MCJ_EXCEPTION) {
104 cpumask_clear_cpu(cpu, mce_inject_cpumask);
105 raise_exception(m, NULL);
106 }
107 }
108
109 /* Inject mce on current CPU */
raise_local(void)110 static int raise_local(void)
111 {
112 struct mce *m = &__get_cpu_var(injectm);
113 int context = MCJ_CTX(m->inject_flags);
114 int ret = 0;
115 int cpu = m->extcpu;
116
117 if (m->inject_flags & MCJ_EXCEPTION) {
118 printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu);
119 switch (context) {
120 case MCJ_CTX_IRQ:
121 /*
122 * Could do more to fake interrupts like
123 * calling irq_enter, but the necessary
124 * machinery isn't exported currently.
125 */
126 /*FALL THROUGH*/
127 case MCJ_CTX_PROCESS:
128 raise_exception(m, NULL);
129 break;
130 default:
131 printk(KERN_INFO "Invalid MCE context\n");
132 ret = -EINVAL;
133 }
134 printk(KERN_INFO "MCE exception done on CPU %d\n", cpu);
135 } else if (m->status) {
136 printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu);
137 raise_poll(m);
138 mce_notify_irq();
139 printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu);
140 } else
141 m->finished = 0;
142
143 return ret;
144 }
145
raise_mce(struct mce * m)146 static void raise_mce(struct mce *m)
147 {
148 int context = MCJ_CTX(m->inject_flags);
149
150 inject_mce(m);
151
152 if (context == MCJ_CTX_RANDOM)
153 return;
154
155 #ifdef CONFIG_X86_LOCAL_APIC
156 if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) {
157 unsigned long start;
158 int cpu;
159
160 get_online_cpus();
161 cpumask_copy(mce_inject_cpumask, cpu_online_mask);
162 cpumask_clear_cpu(get_cpu(), mce_inject_cpumask);
163 for_each_online_cpu(cpu) {
164 struct mce *mcpu = &per_cpu(injectm, cpu);
165 if (!mcpu->finished ||
166 MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM)
167 cpumask_clear_cpu(cpu, mce_inject_cpumask);
168 }
169 if (!cpumask_empty(mce_inject_cpumask)) {
170 if (m->inject_flags & MCJ_IRQ_BRAODCAST) {
171 /*
172 * don't wait because mce_irq_ipi is necessary
173 * to be sync with following raise_local
174 */
175 preempt_disable();
176 smp_call_function_many(mce_inject_cpumask,
177 mce_irq_ipi, NULL, 0);
178 preempt_enable();
179 } else if (m->inject_flags & MCJ_NMI_BROADCAST)
180 apic->send_IPI_mask(mce_inject_cpumask,
181 NMI_VECTOR);
182 }
183 start = jiffies;
184 while (!cpumask_empty(mce_inject_cpumask)) {
185 if (!time_before(jiffies, start + 2*HZ)) {
186 printk(KERN_ERR
187 "Timeout waiting for mce inject %lx\n",
188 *cpumask_bits(mce_inject_cpumask));
189 break;
190 }
191 cpu_relax();
192 }
193 raise_local();
194 put_cpu();
195 put_online_cpus();
196 } else
197 #endif
198 {
199 preempt_disable();
200 raise_local();
201 preempt_enable();
202 }
203 }
204
205 /* Error injection interface */
mce_write(struct file * filp,const char __user * ubuf,size_t usize,loff_t * off)206 static ssize_t mce_write(struct file *filp, const char __user *ubuf,
207 size_t usize, loff_t *off)
208 {
209 struct mce m;
210
211 if (!capable(CAP_SYS_ADMIN))
212 return -EPERM;
213 /*
214 * There are some cases where real MSR reads could slip
215 * through.
216 */
217 if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA))
218 return -EIO;
219
220 if ((unsigned long)usize > sizeof(struct mce))
221 usize = sizeof(struct mce);
222 if (copy_from_user(&m, ubuf, usize))
223 return -EFAULT;
224
225 if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu))
226 return -EINVAL;
227
228 /*
229 * Need to give user space some time to set everything up,
230 * so do it a jiffie or two later everywhere.
231 */
232 schedule_timeout(2);
233
234 mutex_lock(&mce_inject_mutex);
235 raise_mce(&m);
236 mutex_unlock(&mce_inject_mutex);
237 return usize;
238 }
239
inject_init(void)240 static int inject_init(void)
241 {
242 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
243 return -ENOMEM;
244 printk(KERN_INFO "Machine check injector initialized\n");
245 register_mce_write_callback(mce_write);
246 register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0,
247 "mce_notify");
248 return 0;
249 }
250
251 module_init(inject_init);
252 /*
253 * Cannot tolerate unloading currently because we cannot
254 * guarantee all openers of mce_chrdev will get a reference to us.
255 */
256 MODULE_LICENSE("GPL");
257