• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Xen Event Channels (internal header)
3  *
4  * Copyright (C) 2013 Citrix Systems R&D Ltd.
5  *
6  * This source code is licensed under the GNU General Public License,
7  * Version 2 or later.  See the file COPYING for more details.
8  */
9 #ifndef __EVENTS_INTERNAL_H__
10 #define __EVENTS_INTERNAL_H__
11 
12 /* Interrupt types. */
13 enum xen_irq_type {
14 	IRQT_UNBOUND = 0,
15 	IRQT_PIRQ,
16 	IRQT_VIRQ,
17 	IRQT_IPI,
18 	IRQT_EVTCHN
19 };
20 
21 /*
22  * Packed IRQ information:
23  * type - enum xen_irq_type
24  * event channel - irq->event channel mapping
25  * cpu - cpu this event channel is bound to
26  * index - type-specific information:
27  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
28  *           guest, or GSI (real passthrough IRQ) of the device.
29  *    VIRQ - virq number
30  *    IPI - IPI vector
31  *    EVTCHN -
32  */
33 struct irq_info {
34 	struct list_head list;
35 	struct list_head eoi_list;
36 	short refcnt;
37 	short spurious_cnt;
38 	short type;		/* type */
39 	u8 mask_reason;		/* Why is event channel masked */
40 #define EVT_MASK_REASON_EXPLICIT	0x01
41 #define EVT_MASK_REASON_TEMPORARY	0x02
42 #define EVT_MASK_REASON_EOI_PENDING	0x04
43 	u8 is_active;		/* Is event just being handled? */
44 	unsigned irq;
45 	unsigned int evtchn;	/* event channel */
46 	unsigned short cpu;	/* cpu bound */
47 	unsigned short eoi_cpu;	/* EOI must happen on this cpu */
48 	unsigned int irq_epoch;	/* If eoi_cpu valid: irq_epoch of event */
49 	u64 eoi_time;		/* Time in jiffies when to EOI. */
50 	raw_spinlock_t lock;
51 
52 	union {
53 		unsigned short virq;
54 		enum ipi_vector ipi;
55 		struct {
56 			unsigned short pirq;
57 			unsigned short gsi;
58 			unsigned char vector;
59 			unsigned char flags;
60 			uint16_t domid;
61 		} pirq;
62 	} u;
63 };
64 
65 #define PIRQ_NEEDS_EOI	(1 << 0)
66 #define PIRQ_SHAREABLE	(1 << 1)
67 #define PIRQ_MSI_GROUP	(1 << 2)
68 
69 struct evtchn_loop_ctrl;
70 
71 struct evtchn_ops {
72 	unsigned (*max_channels)(void);
73 	unsigned (*nr_channels)(void);
74 
75 	int (*setup)(struct irq_info *info);
76 	void (*remove)(evtchn_port_t port, unsigned int cpu);
77 	void (*bind_to_cpu)(struct irq_info *info, unsigned cpu);
78 
79 	void (*clear_pending)(unsigned port);
80 	void (*set_pending)(unsigned port);
81 	bool (*is_pending)(unsigned port);
82 	void (*mask)(unsigned port);
83 	void (*unmask)(unsigned port);
84 
85 	void (*handle_events)(unsigned cpu, struct evtchn_loop_ctrl *ctrl);
86 	void (*resume)(void);
87 
88 	int (*percpu_init)(unsigned int cpu);
89 	int (*percpu_deinit)(unsigned int cpu);
90 };
91 
92 extern const struct evtchn_ops *evtchn_ops;
93 
94 extern int **evtchn_to_irq;
95 int get_evtchn_to_irq(unsigned int evtchn);
96 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
97 
98 struct irq_info *info_for_irq(unsigned irq);
99 unsigned cpu_from_irq(unsigned irq);
100 unsigned cpu_from_evtchn(unsigned int evtchn);
101 
xen_evtchn_max_channels(void)102 static inline unsigned xen_evtchn_max_channels(void)
103 {
104 	return evtchn_ops->max_channels();
105 }
106 
107 /*
108  * Do any ABI specific setup for a bound event channel before it can
109  * be unmasked and used.
110  */
xen_evtchn_port_setup(struct irq_info * info)111 static inline int xen_evtchn_port_setup(struct irq_info *info)
112 {
113 	if (evtchn_ops->setup)
114 		return evtchn_ops->setup(info);
115 	return 0;
116 }
117 
xen_evtchn_port_remove(evtchn_port_t evtchn,unsigned int cpu)118 static inline void xen_evtchn_port_remove(evtchn_port_t evtchn,
119 					  unsigned int cpu)
120 {
121 	if (evtchn_ops->remove)
122 		evtchn_ops->remove(evtchn, cpu);
123 }
124 
xen_evtchn_port_bind_to_cpu(struct irq_info * info,unsigned cpu)125 static inline void xen_evtchn_port_bind_to_cpu(struct irq_info *info,
126 					       unsigned cpu)
127 {
128 	evtchn_ops->bind_to_cpu(info, cpu);
129 }
130 
clear_evtchn(unsigned port)131 static inline void clear_evtchn(unsigned port)
132 {
133 	evtchn_ops->clear_pending(port);
134 }
135 
set_evtchn(unsigned port)136 static inline void set_evtchn(unsigned port)
137 {
138 	evtchn_ops->set_pending(port);
139 }
140 
test_evtchn(unsigned port)141 static inline bool test_evtchn(unsigned port)
142 {
143 	return evtchn_ops->is_pending(port);
144 }
145 
mask_evtchn(unsigned port)146 static inline void mask_evtchn(unsigned port)
147 {
148 	return evtchn_ops->mask(port);
149 }
150 
unmask_evtchn(unsigned port)151 static inline void unmask_evtchn(unsigned port)
152 {
153 	return evtchn_ops->unmask(port);
154 }
155 
xen_evtchn_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)156 static inline void xen_evtchn_handle_events(unsigned cpu,
157 					    struct evtchn_loop_ctrl *ctrl)
158 {
159 	return evtchn_ops->handle_events(cpu, ctrl);
160 }
161 
xen_evtchn_resume(void)162 static inline void xen_evtchn_resume(void)
163 {
164 	if (evtchn_ops->resume)
165 		evtchn_ops->resume();
166 }
167 
168 void xen_evtchn_2l_init(void);
169 int xen_evtchn_fifo_init(void);
170 
171 #endif /* #ifndef __EVENTS_INTERNAL_H__ */
172