1 #include <linux/hardirq.h>
2
3 #include <asm/x86_init.h>
4
5 #include <xen/interface/xen.h>
6 #include <xen/interface/sched.h>
7 #include <xen/interface/vcpu.h>
8 #include <xen/features.h>
9 #include <xen/events.h>
10
11 #include <asm/xen/hypercall.h>
12 #include <asm/xen/hypervisor.h>
13
14 #include "xen-ops.h"
15
16 /*
17 * Force a proper event-channel callback from Xen after clearing the
18 * callback mask. We do this in a very simple manner, by making a call
19 * down into Xen. The pending flag will be checked by Xen on return.
20 */
xen_force_evtchn_callback(void)21 void xen_force_evtchn_callback(void)
22 {
23 (void)HYPERVISOR_xen_version(0, NULL);
24 }
25
xen_save_fl(void)26 asmlinkage __visible unsigned long xen_save_fl(void)
27 {
28 struct vcpu_info *vcpu;
29 unsigned long flags;
30
31 vcpu = this_cpu_read(xen_vcpu);
32
33 /* flag has opposite sense of mask */
34 flags = !vcpu->evtchn_upcall_mask;
35
36 /* convert to IF type flag
37 -0 -> 0x00000000
38 -1 -> 0xffffffff
39 */
40 return (-flags) & X86_EFLAGS_IF;
41 }
42 PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
43
xen_restore_fl(unsigned long flags)44 __visible void xen_restore_fl(unsigned long flags)
45 {
46 struct vcpu_info *vcpu;
47
48 /* convert from IF type flag */
49 flags = !(flags & X86_EFLAGS_IF);
50
51 /* See xen_irq_enable() for why preemption must be disabled. */
52 preempt_disable();
53 vcpu = this_cpu_read(xen_vcpu);
54 vcpu->evtchn_upcall_mask = flags;
55
56 if (flags == 0) {
57 barrier(); /* unmask then check (avoid races) */
58 if (unlikely(vcpu->evtchn_upcall_pending))
59 xen_force_evtchn_callback();
60 preempt_enable();
61 } else
62 preempt_enable_no_resched();
63 }
64 PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
65
xen_irq_disable(void)66 asmlinkage __visible void xen_irq_disable(void)
67 {
68 /* There's a one instruction preempt window here. We need to
69 make sure we're don't switch CPUs between getting the vcpu
70 pointer and updating the mask. */
71 preempt_disable();
72 this_cpu_read(xen_vcpu)->evtchn_upcall_mask = 1;
73 preempt_enable_no_resched();
74 }
75 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
76
xen_irq_enable(void)77 asmlinkage __visible void xen_irq_enable(void)
78 {
79 struct vcpu_info *vcpu;
80
81 /*
82 * We may be preempted as soon as vcpu->evtchn_upcall_mask is
83 * cleared, so disable preemption to ensure we check for
84 * events on the VCPU we are still running on.
85 */
86 preempt_disable();
87
88 vcpu = this_cpu_read(xen_vcpu);
89 vcpu->evtchn_upcall_mask = 0;
90
91 /* Doesn't matter if we get preempted here, because any
92 pending event will get dealt with anyway. */
93
94 barrier(); /* unmask then check (avoid races) */
95 if (unlikely(vcpu->evtchn_upcall_pending))
96 xen_force_evtchn_callback();
97
98 preempt_enable();
99 }
100 PV_CALLEE_SAVE_REGS_THUNK(xen_irq_enable);
101
xen_safe_halt(void)102 static void xen_safe_halt(void)
103 {
104 /* Blocking includes an implicit local_irq_enable(). */
105 if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0)
106 BUG();
107 }
108
xen_halt(void)109 static void xen_halt(void)
110 {
111 if (irqs_disabled())
112 HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
113 else
114 xen_safe_halt();
115 }
116
117 static const struct pv_irq_ops xen_irq_ops __initconst = {
118 .save_fl = PV_CALLEE_SAVE(xen_save_fl),
119 .restore_fl = PV_CALLEE_SAVE(xen_restore_fl),
120 .irq_disable = PV_CALLEE_SAVE(xen_irq_disable),
121 .irq_enable = PV_CALLEE_SAVE(xen_irq_enable),
122
123 .safe_halt = xen_safe_halt,
124 .halt = xen_halt,
125 #ifdef CONFIG_X86_64
126 .adjust_exception_frame = xen_adjust_exception_frame,
127 #endif
128 };
129
xen_init_irq_ops(void)130 void __init xen_init_irq_ops(void)
131 {
132 /* For PVH we use default pv_irq_ops settings. */
133 if (!xen_feature(XENFEAT_hvm_callback_vector))
134 pv_irq_ops = xen_irq_ops;
135 x86_init.irqs.intr_init = xen_init_IRQ;
136 }
137