1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Xen event channels (2-level ABI)
4 *
5 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
6 */
7
8 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
9
10 #include <linux/linkage.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13
14 #include <asm/sync_bitops.h>
15 #include <asm/xen/hypercall.h>
16 #include <asm/xen/hypervisor.h>
17
18 #include <xen/xen.h>
19 #include <xen/xen-ops.h>
20 #include <xen/events.h>
21 #include <xen/interface/xen.h>
22 #include <xen/interface/event_channel.h>
23
24 #include "events_internal.h"
25
26 /*
27 * Note sizeof(xen_ulong_t) can be more than sizeof(unsigned long). Be
28 * careful to only use bitops which allow for this (e.g
29 * test_bit/find_first_bit and friends but not __ffs) and to pass
30 * BITS_PER_EVTCHN_WORD as the bitmask length.
31 */
32 #define BITS_PER_EVTCHN_WORD (sizeof(xen_ulong_t)*8)
33 /*
34 * Make a bitmask (i.e. unsigned long *) of a xen_ulong_t
35 * array. Primarily to avoid long lines (hence the terse name).
36 */
37 #define BM(x) (unsigned long *)(x)
38 /* Find the first set bit in a evtchn mask */
39 #define EVTCHN_FIRST_BIT(w) find_first_bit(BM(&(w)), BITS_PER_EVTCHN_WORD)
40
41 #define EVTCHN_MASK_SIZE (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)
42
43 static DEFINE_PER_CPU(xen_ulong_t [EVTCHN_MASK_SIZE], cpu_evtchn_mask);
44
evtchn_2l_max_channels(void)45 static unsigned evtchn_2l_max_channels(void)
46 {
47 return EVTCHN_2L_NR_CHANNELS;
48 }
49
evtchn_2l_remove(evtchn_port_t evtchn,unsigned int cpu)50 static void evtchn_2l_remove(evtchn_port_t evtchn, unsigned int cpu)
51 {
52 clear_bit(evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
53 }
54
evtchn_2l_bind_to_cpu(struct irq_info * info,unsigned cpu)55 static void evtchn_2l_bind_to_cpu(struct irq_info *info, unsigned cpu)
56 {
57 clear_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, info->cpu)));
58 set_bit(info->evtchn, BM(per_cpu(cpu_evtchn_mask, cpu)));
59 }
60
evtchn_2l_clear_pending(unsigned port)61 static void evtchn_2l_clear_pending(unsigned port)
62 {
63 struct shared_info *s = HYPERVISOR_shared_info;
64 sync_clear_bit(port, BM(&s->evtchn_pending[0]));
65 }
66
evtchn_2l_set_pending(unsigned port)67 static void evtchn_2l_set_pending(unsigned port)
68 {
69 struct shared_info *s = HYPERVISOR_shared_info;
70 sync_set_bit(port, BM(&s->evtchn_pending[0]));
71 }
72
evtchn_2l_is_pending(unsigned port)73 static bool evtchn_2l_is_pending(unsigned port)
74 {
75 struct shared_info *s = HYPERVISOR_shared_info;
76 return sync_test_bit(port, BM(&s->evtchn_pending[0]));
77 }
78
evtchn_2l_mask(unsigned port)79 static void evtchn_2l_mask(unsigned port)
80 {
81 struct shared_info *s = HYPERVISOR_shared_info;
82 sync_set_bit(port, BM(&s->evtchn_mask[0]));
83 }
84
evtchn_2l_unmask(unsigned port)85 static void evtchn_2l_unmask(unsigned port)
86 {
87 struct shared_info *s = HYPERVISOR_shared_info;
88 unsigned int cpu = get_cpu();
89 int do_hypercall = 0, evtchn_pending = 0;
90
91 BUG_ON(!irqs_disabled());
92
93 smp_wmb(); /* All writes before unmask must be visible. */
94
95 if (unlikely((cpu != cpu_from_evtchn(port))))
96 do_hypercall = 1;
97 else {
98 /*
99 * Need to clear the mask before checking pending to
100 * avoid a race with an event becoming pending.
101 *
102 * EVTCHNOP_unmask will only trigger an upcall if the
103 * mask bit was set, so if a hypercall is needed
104 * remask the event.
105 */
106 sync_clear_bit(port, BM(&s->evtchn_mask[0]));
107 evtchn_pending = sync_test_bit(port, BM(&s->evtchn_pending[0]));
108
109 if (unlikely(evtchn_pending && xen_hvm_domain())) {
110 sync_set_bit(port, BM(&s->evtchn_mask[0]));
111 do_hypercall = 1;
112 }
113 }
114
115 /* Slow path (hypercall) if this is a non-local port or if this is
116 * an hvm domain and an event is pending (hvm domains don't have
117 * their own implementation of irq_enable). */
118 if (do_hypercall) {
119 struct evtchn_unmask unmask = { .port = port };
120 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
121 } else {
122 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
123
124 /*
125 * The following is basically the equivalent of
126 * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
127 * the interrupt edge' if the channel is masked.
128 */
129 if (evtchn_pending &&
130 !sync_test_and_set_bit(port / BITS_PER_EVTCHN_WORD,
131 BM(&vcpu_info->evtchn_pending_sel)))
132 vcpu_info->evtchn_upcall_pending = 1;
133 }
134
135 put_cpu();
136 }
137
138 static DEFINE_PER_CPU(unsigned int, current_word_idx);
139 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
140
141 /*
142 * Mask out the i least significant bits of w
143 */
144 #define MASK_LSBS(w, i) (w & ((~((xen_ulong_t)0UL)) << i))
145
active_evtchns(unsigned int cpu,struct shared_info * sh,unsigned int idx)146 static inline xen_ulong_t active_evtchns(unsigned int cpu,
147 struct shared_info *sh,
148 unsigned int idx)
149 {
150 return sh->evtchn_pending[idx] &
151 per_cpu(cpu_evtchn_mask, cpu)[idx] &
152 ~sh->evtchn_mask[idx];
153 }
154
155 /*
156 * Search the CPU's pending events bitmasks. For each one found, map
157 * the event number to an irq, and feed it into do_IRQ() for handling.
158 *
159 * Xen uses a two-level bitmap to speed searching. The first level is
160 * a bitset of words which contain pending event bits. The second
161 * level is a bitset of pending events themselves.
162 */
evtchn_2l_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)163 static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
164 {
165 int irq;
166 xen_ulong_t pending_words;
167 xen_ulong_t pending_bits;
168 int start_word_idx, start_bit_idx;
169 int word_idx, bit_idx;
170 int i;
171 struct shared_info *s = HYPERVISOR_shared_info;
172 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
173
174 /* Timer interrupt has highest priority. */
175 irq = irq_from_virq(cpu, VIRQ_TIMER);
176 if (irq != -1) {
177 unsigned int evtchn = evtchn_from_irq(irq);
178 word_idx = evtchn / BITS_PER_LONG;
179 bit_idx = evtchn % BITS_PER_LONG;
180 if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
181 generic_handle_irq(irq);
182 }
183
184 /*
185 * Master flag must be cleared /before/ clearing
186 * selector flag. xchg_xen_ulong must contain an
187 * appropriate barrier.
188 */
189 pending_words = xchg_xen_ulong(&vcpu_info->evtchn_pending_sel, 0);
190
191 start_word_idx = __this_cpu_read(current_word_idx);
192 start_bit_idx = __this_cpu_read(current_bit_idx);
193
194 word_idx = start_word_idx;
195
196 for (i = 0; pending_words != 0; i++) {
197 xen_ulong_t words;
198
199 words = MASK_LSBS(pending_words, word_idx);
200
201 /*
202 * If we masked out all events, wrap to beginning.
203 */
204 if (words == 0) {
205 word_idx = 0;
206 bit_idx = 0;
207 continue;
208 }
209 word_idx = EVTCHN_FIRST_BIT(words);
210
211 pending_bits = active_evtchns(cpu, s, word_idx);
212 bit_idx = 0; /* usually scan entire word from start */
213 /*
214 * We scan the starting word in two parts.
215 *
216 * 1st time: start in the middle, scanning the
217 * upper bits.
218 *
219 * 2nd time: scan the whole word (not just the
220 * parts skipped in the first pass) -- if an
221 * event in the previously scanned bits is
222 * pending again it would just be scanned on
223 * the next loop anyway.
224 */
225 if (word_idx == start_word_idx) {
226 if (i == 0)
227 bit_idx = start_bit_idx;
228 }
229
230 do {
231 xen_ulong_t bits;
232 int port;
233
234 bits = MASK_LSBS(pending_bits, bit_idx);
235
236 /* If we masked out all events, move on. */
237 if (bits == 0)
238 break;
239
240 bit_idx = EVTCHN_FIRST_BIT(bits);
241
242 /* Process port. */
243 port = (word_idx * BITS_PER_EVTCHN_WORD) + bit_idx;
244 handle_irq_for_port(port, ctrl);
245
246 bit_idx = (bit_idx + 1) % BITS_PER_EVTCHN_WORD;
247
248 /* Next caller starts at last processed + 1 */
249 __this_cpu_write(current_word_idx,
250 bit_idx ? word_idx :
251 (word_idx+1) % BITS_PER_EVTCHN_WORD);
252 __this_cpu_write(current_bit_idx, bit_idx);
253 } while (bit_idx != 0);
254
255 /* Scan start_l1i twice; all others once. */
256 if ((word_idx != start_word_idx) || (i != 0))
257 pending_words &= ~(1UL << word_idx);
258
259 word_idx = (word_idx + 1) % BITS_PER_EVTCHN_WORD;
260 }
261 }
262
xen_debug_interrupt(int irq,void * dev_id)263 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
264 {
265 struct shared_info *sh = HYPERVISOR_shared_info;
266 int cpu = smp_processor_id();
267 xen_ulong_t *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
268 int i;
269 unsigned long flags;
270 static DEFINE_SPINLOCK(debug_lock);
271 struct vcpu_info *v;
272
273 spin_lock_irqsave(&debug_lock, flags);
274
275 printk("\nvcpu %d\n ", cpu);
276
277 for_each_online_cpu(i) {
278 int pending;
279 v = per_cpu(xen_vcpu, i);
280 pending = (get_irq_regs() && i == cpu)
281 ? xen_irqs_disabled(get_irq_regs())
282 : v->evtchn_upcall_mask;
283 printk("%d: masked=%d pending=%d event_sel %0*"PRI_xen_ulong"\n ", i,
284 pending, v->evtchn_upcall_pending,
285 (int)(sizeof(v->evtchn_pending_sel)*2),
286 v->evtchn_pending_sel);
287 }
288 v = per_cpu(xen_vcpu, cpu);
289
290 printk("\npending:\n ");
291 for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
292 printk("%0*"PRI_xen_ulong"%s",
293 (int)sizeof(sh->evtchn_pending[0])*2,
294 sh->evtchn_pending[i],
295 i % 8 == 0 ? "\n " : " ");
296 printk("\nglobal mask:\n ");
297 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
298 printk("%0*"PRI_xen_ulong"%s",
299 (int)(sizeof(sh->evtchn_mask[0])*2),
300 sh->evtchn_mask[i],
301 i % 8 == 0 ? "\n " : " ");
302
303 printk("\nglobally unmasked:\n ");
304 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
305 printk("%0*"PRI_xen_ulong"%s",
306 (int)(sizeof(sh->evtchn_mask[0])*2),
307 sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
308 i % 8 == 0 ? "\n " : " ");
309
310 printk("\nlocal cpu%d mask:\n ", cpu);
311 for (i = (EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD)-1; i >= 0; i--)
312 printk("%0*"PRI_xen_ulong"%s", (int)(sizeof(cpu_evtchn[0])*2),
313 cpu_evtchn[i],
314 i % 8 == 0 ? "\n " : " ");
315
316 printk("\nlocally unmasked:\n ");
317 for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
318 xen_ulong_t pending = sh->evtchn_pending[i]
319 & ~sh->evtchn_mask[i]
320 & cpu_evtchn[i];
321 printk("%0*"PRI_xen_ulong"%s",
322 (int)(sizeof(sh->evtchn_mask[0])*2),
323 pending, i % 8 == 0 ? "\n " : " ");
324 }
325
326 printk("\npending list:\n");
327 for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
328 if (sync_test_bit(i, BM(sh->evtchn_pending))) {
329 int word_idx = i / BITS_PER_EVTCHN_WORD;
330 printk(" %d: event %d -> irq %d%s%s%s\n",
331 cpu_from_evtchn(i), i,
332 get_evtchn_to_irq(i),
333 sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
334 ? "" : " l2-clear",
335 !sync_test_bit(i, BM(sh->evtchn_mask))
336 ? "" : " globally-masked",
337 sync_test_bit(i, BM(cpu_evtchn))
338 ? "" : " locally-masked");
339 }
340 }
341
342 spin_unlock_irqrestore(&debug_lock, flags);
343
344 return IRQ_HANDLED;
345 }
346
evtchn_2l_resume(void)347 static void evtchn_2l_resume(void)
348 {
349 int i;
350
351 for_each_online_cpu(i)
352 memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
353 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
354 }
355
evtchn_2l_percpu_deinit(unsigned int cpu)356 static int evtchn_2l_percpu_deinit(unsigned int cpu)
357 {
358 memset(per_cpu(cpu_evtchn_mask, cpu), 0, sizeof(xen_ulong_t) *
359 EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
360
361 return 0;
362 }
363
364 static const struct evtchn_ops evtchn_ops_2l = {
365 .max_channels = evtchn_2l_max_channels,
366 .nr_channels = evtchn_2l_max_channels,
367 .remove = evtchn_2l_remove,
368 .bind_to_cpu = evtchn_2l_bind_to_cpu,
369 .clear_pending = evtchn_2l_clear_pending,
370 .set_pending = evtchn_2l_set_pending,
371 .is_pending = evtchn_2l_is_pending,
372 .mask = evtchn_2l_mask,
373 .unmask = evtchn_2l_unmask,
374 .handle_events = evtchn_2l_handle_events,
375 .resume = evtchn_2l_resume,
376 .percpu_deinit = evtchn_2l_percpu_deinit,
377 };
378
xen_evtchn_2l_init(void)379 void __init xen_evtchn_2l_init(void)
380 {
381 pr_info("Using 2-level ABI\n");
382 evtchn_ops = &evtchn_ops_2l;
383 }
384