1 /*
2 * Xen SMP support
3 *
4 * This file implements the Xen versions of smp_ops. SMP under Xen is
5 * very straightforward. Bringing a CPU up is simply a matter of
6 * loading its initial context and setting it running.
7 *
8 * IPIs are handled through the Xen event mechanism.
9 *
10 * Because virtual CPUs can be scheduled onto any real CPU, there's no
11 * useful topology information for the kernel to make use of. As a
12 * result, all CPUs are treated as if they're single-core and
13 * single-threaded.
14 */
15 #include <linux/sched.h>
16 #include <linux/err.h>
17 #include <linux/smp.h>
18
19 #include <asm/paravirt.h>
20 #include <asm/desc.h>
21 #include <asm/pgtable.h>
22 #include <asm/cpu.h>
23
24 #include <xen/interface/xen.h>
25 #include <xen/interface/vcpu.h>
26
27 #include <asm/xen/interface.h>
28 #include <asm/xen/hypercall.h>
29
30 #include <xen/page.h>
31 #include <xen/events.h>
32
33 #include "xen-ops.h"
34 #include "mmu.h"
35
36 cpumask_var_t xen_cpu_initialized_map;
37
38 static DEFINE_PER_CPU(int, resched_irq);
39 static DEFINE_PER_CPU(int, callfunc_irq);
40 static DEFINE_PER_CPU(int, callfuncsingle_irq);
41 static DEFINE_PER_CPU(int, debug_irq) = -1;
42
43 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
44 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
45
46 /*
47 * Reschedule call back. Nothing to do,
48 * all the work is done automatically when
49 * we return from the interrupt.
50 */
xen_reschedule_interrupt(int irq,void * dev_id)51 static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id)
52 {
53 #ifdef CONFIG_X86_32
54 __get_cpu_var(irq_stat).irq_resched_count++;
55 #else
56 add_pda(irq_resched_count, 1);
57 #endif
58
59 return IRQ_HANDLED;
60 }
61
cpu_bringup(void)62 static __cpuinit void cpu_bringup(void)
63 {
64 int cpu = smp_processor_id();
65
66 cpu_init();
67 touch_softlockup_watchdog();
68 preempt_disable();
69
70 xen_enable_sysenter();
71 xen_enable_syscall();
72
73 cpu = smp_processor_id();
74 smp_store_cpu_info(cpu);
75 cpu_data(cpu).x86_max_cores = 1;
76 set_cpu_sibling_map(cpu);
77
78 xen_setup_cpu_clockevents();
79
80 cpu_set(cpu, cpu_online_map);
81 x86_write_percpu(cpu_state, CPU_ONLINE);
82 wmb();
83
84 /* We can take interrupts now: we're officially "up". */
85 local_irq_enable();
86
87 wmb(); /* make sure everything is out */
88 }
89
cpu_bringup_and_idle(void)90 static __cpuinit void cpu_bringup_and_idle(void)
91 {
92 cpu_bringup();
93 cpu_idle();
94 }
95
xen_smp_intr_init(unsigned int cpu)96 static int xen_smp_intr_init(unsigned int cpu)
97 {
98 int rc;
99 const char *resched_name, *callfunc_name, *debug_name;
100
101 resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu);
102 rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR,
103 cpu,
104 xen_reschedule_interrupt,
105 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
106 resched_name,
107 NULL);
108 if (rc < 0)
109 goto fail;
110 per_cpu(resched_irq, cpu) = rc;
111
112 callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu);
113 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR,
114 cpu,
115 xen_call_function_interrupt,
116 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
117 callfunc_name,
118 NULL);
119 if (rc < 0)
120 goto fail;
121 per_cpu(callfunc_irq, cpu) = rc;
122
123 debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu);
124 rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, xen_debug_interrupt,
125 IRQF_DISABLED | IRQF_PERCPU | IRQF_NOBALANCING,
126 debug_name, NULL);
127 if (rc < 0)
128 goto fail;
129 per_cpu(debug_irq, cpu) = rc;
130
131 callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu);
132 rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR,
133 cpu,
134 xen_call_function_single_interrupt,
135 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
136 callfunc_name,
137 NULL);
138 if (rc < 0)
139 goto fail;
140 per_cpu(callfuncsingle_irq, cpu) = rc;
141
142 return 0;
143
144 fail:
145 if (per_cpu(resched_irq, cpu) >= 0)
146 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
147 if (per_cpu(callfunc_irq, cpu) >= 0)
148 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
149 if (per_cpu(debug_irq, cpu) >= 0)
150 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
151 if (per_cpu(callfuncsingle_irq, cpu) >= 0)
152 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
153
154 return rc;
155 }
156
xen_fill_possible_map(void)157 static void __init xen_fill_possible_map(void)
158 {
159 int i, rc;
160
161 for (i = 0; i < nr_cpu_ids; i++) {
162 rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
163 if (rc >= 0) {
164 num_processors++;
165 cpu_set(i, cpu_possible_map);
166 }
167 }
168 }
169
xen_smp_prepare_boot_cpu(void)170 static void __init xen_smp_prepare_boot_cpu(void)
171 {
172 BUG_ON(smp_processor_id() != 0);
173 native_smp_prepare_boot_cpu();
174
175 /* We've switched to the "real" per-cpu gdt, so make sure the
176 old memory can be recycled */
177 make_lowmem_page_readwrite(&per_cpu_var(gdt_page));
178
179 xen_setup_vcpu_info_placement();
180 }
181
xen_smp_prepare_cpus(unsigned int max_cpus)182 static void __init xen_smp_prepare_cpus(unsigned int max_cpus)
183 {
184 unsigned cpu;
185
186 xen_init_lock_cpu(0);
187
188 smp_store_cpu_info(0);
189 cpu_data(0).x86_max_cores = 1;
190 set_cpu_sibling_map(0);
191
192 if (xen_smp_intr_init(0))
193 BUG();
194
195 if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
196 panic("could not allocate xen_cpu_initialized_map\n");
197
198 cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
199
200 /* Restrict the possible_map according to max_cpus. */
201 while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
202 for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
203 continue;
204 cpu_clear(cpu, cpu_possible_map);
205 }
206
207 for_each_possible_cpu (cpu) {
208 struct task_struct *idle;
209
210 if (cpu == 0)
211 continue;
212
213 idle = fork_idle(cpu);
214 if (IS_ERR(idle))
215 panic("failed fork for CPU %d", cpu);
216
217 cpu_set(cpu, cpu_present_map);
218 }
219 }
220
221 static __cpuinit int
cpu_initialize_context(unsigned int cpu,struct task_struct * idle)222 cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
223 {
224 struct vcpu_guest_context *ctxt;
225 struct desc_struct *gdt;
226
227 if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
228 return 0;
229
230 ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
231 if (ctxt == NULL)
232 return -ENOMEM;
233
234 gdt = get_cpu_gdt_table(cpu);
235
236 ctxt->flags = VGCF_IN_KERNEL;
237 ctxt->user_regs.ds = __USER_DS;
238 ctxt->user_regs.es = __USER_DS;
239 ctxt->user_regs.ss = __KERNEL_DS;
240 #ifdef CONFIG_X86_32
241 ctxt->user_regs.fs = __KERNEL_PERCPU;
242 #endif
243 ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
244 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
245
246 memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
247
248 xen_copy_trap_info(ctxt->trap_ctxt);
249
250 ctxt->ldt_ents = 0;
251
252 BUG_ON((unsigned long)gdt & ~PAGE_MASK);
253 make_lowmem_page_readonly(gdt);
254
255 ctxt->gdt_frames[0] = virt_to_mfn(gdt);
256 ctxt->gdt_ents = GDT_ENTRIES;
257
258 ctxt->user_regs.cs = __KERNEL_CS;
259 ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
260
261 ctxt->kernel_ss = __KERNEL_DS;
262 ctxt->kernel_sp = idle->thread.sp0;
263
264 #ifdef CONFIG_X86_32
265 ctxt->event_callback_cs = __KERNEL_CS;
266 ctxt->failsafe_callback_cs = __KERNEL_CS;
267 #endif
268 ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback;
269 ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;
270
271 per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
272 ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
273
274 if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
275 BUG();
276
277 kfree(ctxt);
278 return 0;
279 }
280
xen_cpu_up(unsigned int cpu)281 static int __cpuinit xen_cpu_up(unsigned int cpu)
282 {
283 struct task_struct *idle = idle_task(cpu);
284 int rc;
285
286 #ifdef CONFIG_X86_64
287 /* Allocate node local memory for AP pdas */
288 WARN_ON(cpu == 0);
289 if (cpu > 0) {
290 rc = get_local_pda(cpu);
291 if (rc)
292 return rc;
293 }
294 #endif
295
296 #ifdef CONFIG_X86_32
297 init_gdt(cpu);
298 per_cpu(current_task, cpu) = idle;
299 irq_ctx_init(cpu);
300 #else
301 cpu_pda(cpu)->pcurrent = idle;
302 clear_tsk_thread_flag(idle, TIF_FORK);
303 #endif
304 xen_setup_timer(cpu);
305 xen_init_lock_cpu(cpu);
306
307 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
308
309 /* make sure interrupts start blocked */
310 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
311
312 rc = cpu_initialize_context(cpu, idle);
313 if (rc)
314 return rc;
315
316 if (num_online_cpus() == 1)
317 alternatives_smp_switch(1);
318
319 rc = xen_smp_intr_init(cpu);
320 if (rc)
321 return rc;
322
323 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
324 BUG_ON(rc);
325
326 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) {
327 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
328 barrier();
329 }
330
331 return 0;
332 }
333
xen_smp_cpus_done(unsigned int max_cpus)334 static void xen_smp_cpus_done(unsigned int max_cpus)
335 {
336 }
337
338 #ifdef CONFIG_HOTPLUG_CPU
xen_cpu_disable(void)339 static int xen_cpu_disable(void)
340 {
341 unsigned int cpu = smp_processor_id();
342 if (cpu == 0)
343 return -EBUSY;
344
345 cpu_disable_common();
346
347 load_cr3(swapper_pg_dir);
348 return 0;
349 }
350
xen_cpu_die(unsigned int cpu)351 static void xen_cpu_die(unsigned int cpu)
352 {
353 while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
354 current->state = TASK_UNINTERRUPTIBLE;
355 schedule_timeout(HZ/10);
356 }
357 unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
358 unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
359 unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL);
360 unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL);
361 xen_uninit_lock_cpu(cpu);
362 xen_teardown_timer(cpu);
363
364 if (num_online_cpus() == 1)
365 alternatives_smp_switch(0);
366 }
367
xen_play_dead(void)368 static void __cpuinit xen_play_dead(void) /* used only with CPU_HOTPLUG */
369 {
370 play_dead_common();
371 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
372 cpu_bringup();
373 }
374
375 #else /* !CONFIG_HOTPLUG_CPU */
xen_cpu_disable(void)376 static int xen_cpu_disable(void)
377 {
378 return -ENOSYS;
379 }
380
xen_cpu_die(unsigned int cpu)381 static void xen_cpu_die(unsigned int cpu)
382 {
383 BUG();
384 }
385
xen_play_dead(void)386 static void xen_play_dead(void)
387 {
388 BUG();
389 }
390
391 #endif
stop_self(void * v)392 static void stop_self(void *v)
393 {
394 int cpu = smp_processor_id();
395
396 /* make sure we're not pinning something down */
397 load_cr3(swapper_pg_dir);
398 /* should set up a minimal gdt */
399
400 HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL);
401 BUG();
402 }
403
xen_smp_send_stop(void)404 static void xen_smp_send_stop(void)
405 {
406 smp_call_function(stop_self, NULL, 0);
407 }
408
xen_smp_send_reschedule(int cpu)409 static void xen_smp_send_reschedule(int cpu)
410 {
411 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
412 }
413
xen_send_IPI_mask(const struct cpumask * mask,enum ipi_vector vector)414 static void xen_send_IPI_mask(const struct cpumask *mask,
415 enum ipi_vector vector)
416 {
417 unsigned cpu;
418
419 for_each_cpu_and(cpu, mask, cpu_online_mask)
420 xen_send_IPI_one(cpu, vector);
421 }
422
xen_smp_send_call_function_ipi(const struct cpumask * mask)423 static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
424 {
425 int cpu;
426
427 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
428
429 /* Make sure other vcpus get a chance to run if they need to. */
430 for_each_cpu(cpu, mask) {
431 if (xen_vcpu_stolen(cpu)) {
432 HYPERVISOR_sched_op(SCHEDOP_yield, 0);
433 break;
434 }
435 }
436 }
437
xen_smp_send_call_function_single_ipi(int cpu)438 static void xen_smp_send_call_function_single_ipi(int cpu)
439 {
440 xen_send_IPI_mask(cpumask_of(cpu),
441 XEN_CALL_FUNCTION_SINGLE_VECTOR);
442 }
443
xen_call_function_interrupt(int irq,void * dev_id)444 static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
445 {
446 irq_enter();
447 generic_smp_call_function_interrupt();
448 #ifdef CONFIG_X86_32
449 __get_cpu_var(irq_stat).irq_call_count++;
450 #else
451 add_pda(irq_call_count, 1);
452 #endif
453 irq_exit();
454
455 return IRQ_HANDLED;
456 }
457
xen_call_function_single_interrupt(int irq,void * dev_id)458 static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
459 {
460 irq_enter();
461 generic_smp_call_function_single_interrupt();
462 #ifdef CONFIG_X86_32
463 __get_cpu_var(irq_stat).irq_call_count++;
464 #else
465 add_pda(irq_call_count, 1);
466 #endif
467 irq_exit();
468
469 return IRQ_HANDLED;
470 }
471
472 static const struct smp_ops xen_smp_ops __initdata = {
473 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
474 .smp_prepare_cpus = xen_smp_prepare_cpus,
475 .smp_cpus_done = xen_smp_cpus_done,
476
477 .cpu_up = xen_cpu_up,
478 .cpu_die = xen_cpu_die,
479 .cpu_disable = xen_cpu_disable,
480 .play_dead = xen_play_dead,
481
482 .smp_send_stop = xen_smp_send_stop,
483 .smp_send_reschedule = xen_smp_send_reschedule,
484
485 .send_call_func_ipi = xen_smp_send_call_function_ipi,
486 .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
487 };
488
xen_smp_init(void)489 void __init xen_smp_init(void)
490 {
491 smp_ops = xen_smp_ops;
492 xen_fill_possible_map();
493 xen_init_spinlocks();
494 }
495