1 /*
2 * Xen Event Channels (internal header)
3 *
4 * Copyright (C) 2013 Citrix Systems R&D Ltd.
5 *
6 * This source code is licensed under the GNU General Public License,
7 * Version 2 or later. See the file COPYING for more details.
8 */
9 #ifndef __EVENTS_INTERNAL_H__
10 #define __EVENTS_INTERNAL_H__
11
12 /* Interrupt types. */
13 enum xen_irq_type {
14 IRQT_UNBOUND = 0,
15 IRQT_PIRQ,
16 IRQT_VIRQ,
17 IRQT_IPI,
18 IRQT_EVTCHN
19 };
20
21 /*
22 * Packed IRQ information:
23 * type - enum xen_irq_type
24 * event channel - irq->event channel mapping
25 * cpu - cpu this event channel is bound to
26 * index - type-specific information:
27 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
28 * guest, or GSI (real passthrough IRQ) of the device.
29 * VIRQ - virq number
30 * IPI - IPI vector
31 * EVTCHN -
32 */
33 struct irq_info {
34 struct list_head list;
35 struct list_head eoi_list;
36 short refcnt;
37 short spurious_cnt;
38 enum xen_irq_type type; /* type */
39 unsigned irq;
40 unsigned int evtchn; /* event channel */
41 unsigned short cpu; /* cpu bound */
42 unsigned short eoi_cpu; /* EOI must happen on this cpu */
43 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
44 u64 eoi_time; /* Time in jiffies when to EOI. */
45
46 union {
47 unsigned short virq;
48 enum ipi_vector ipi;
49 struct {
50 unsigned short pirq;
51 unsigned short gsi;
52 unsigned char vector;
53 unsigned char flags;
54 uint16_t domid;
55 } pirq;
56 } u;
57 };
58
59 #define PIRQ_NEEDS_EOI (1 << 0)
60 #define PIRQ_SHAREABLE (1 << 1)
61 #define PIRQ_MSI_GROUP (1 << 2)
62
63 struct evtchn_loop_ctrl;
64
65 struct evtchn_ops {
66 unsigned (*max_channels)(void);
67 unsigned (*nr_channels)(void);
68
69 int (*setup)(struct irq_info *info);
70 void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
71
72 void (*clear_pending)(unsigned port);
73 void (*set_pending)(unsigned port);
74 bool (*is_pending)(unsigned port);
75 bool (*test_and_set_mask)(unsigned port);
76 void (*mask)(unsigned port);
77 void (*unmask)(unsigned port);
78
79 void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
80 void (*resume)(void);
81
82 int (*percpu_init)(unsigned int cpu);
83 int (*percpu_deinit)(unsigned int cpu);
84 };
85
86 extern const struct evtchn_ops *evtchn_ops;
87
88 extern int **evtchn_to_irq;
89 int get_evtchn_to_irq(unsigned int evtchn);
90 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
91
92 struct irq_info *info_for_irq(unsigned irq);
93 unsigned cpu_from_irq(unsigned irq);
94 unsigned cpu_from_evtchn(unsigned int evtchn);
95
xen_evtchn_max_channels(void)96 static inline unsigned xen_evtchn_max_channels(void)
97 {
98 return evtchn_ops->max_channels();
99 }
100
101 /*
102 * Do any ABI specific setup for a bound event channel before it can
103 * be unmasked and used.
104 */
xen_evtchn_port_setup(struct irq_info * info)105 static inline int xen_evtchn_port_setup(struct irq_info *info)
106 {
107 if (evtchn_ops->setup)
108 return evtchn_ops->setup(info);
109 return 0;
110 }
111
xen_evtchn_port_bind_to_cpu(struct irq_info * info,unsigned cpu)112 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
113 unsigned cpu)
114 {
115 evtchn_ops->bind_to_cpu(info, cpu);
116 }
117
clear_evtchn(unsigned port)118 static inline void clear_evtchn(unsigned port)
119 {
120 evtchn_ops->clear_pending(port);
121 }
122
set_evtchn(unsigned port)123 static inline void set_evtchn(unsigned port)
124 {
125 evtchn_ops->set_pending(port);
126 }
127
test_evtchn(unsigned port)128 static inline bool test_evtchn(unsigned port)
129 {
130 return evtchn_ops->is_pending(port);
131 }
132
test_and_set_mask(unsigned port)133 static inline bool test_and_set_mask(unsigned port)
134 {
135 return evtchn_ops->test_and_set_mask(port);
136 }
137
mask_evtchn(unsigned port)138 static inline void mask_evtchn(unsigned port)
139 {
140 return evtchn_ops->mask(port);
141 }
142
unmask_evtchn(unsigned port)143 static inline void unmask_evtchn(unsigned port)
144 {
145 return evtchn_ops->unmask(port);
146 }
147
xen_evtchn_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)148 static inline void xen_evtchn_handle_events(unsigned cpu,
149 struct evtchn_loop_ctrl *ctrl)
150 {
151 return evtchn_ops->handle_events(cpu, ctrl);
152 }
153
xen_evtchn_resume(void)154 static inline void xen_evtchn_resume(void)
155 {
156 if (evtchn_ops->resume)
157 evtchn_ops->resume();
158 }
159
160 void xen_evtchn_2l_init(void);
161 int xen_evtchn_fifo_init(void);
162
163 #endif /* #ifndef __EVENTS_INTERNAL_H__ */
164