1 /*
2 * Xen event channels (FIFO-based ABI)
3 *
4 * Copyright (C) 2013 Citrix Systems R&D ltd.
5 *
6 * This source code is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation; either version 2 of the
9 * License, or (at your option) any later version.
10 *
11 * Or, when distributed separately from the Linux kernel or
12 * incorporated into other software packages, subject to the following
13 * license:
14 *
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
21 *
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
31 * IN THE SOFTWARE.
32 */
33
34 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
35
36 #include <linux/linkage.h>
37 #include <linux/interrupt.h>
38 #include <linux/irq.h>
39 #include <linux/smp.h>
40 #include <linux/percpu.h>
41 #include <linux/cpu.h>
42
43 #include <asm/barrier.h>
44 #include <asm/sync_bitops.h>
45 #include <asm/xen/hypercall.h>
46 #include <asm/xen/hypervisor.h>
47
48 #include <xen/xen.h>
49 #include <xen/xen-ops.h>
50 #include <xen/events.h>
51 #include <xen/interface/xen.h>
52 #include <xen/interface/event_channel.h>
53 #include <xen/page.h>
54
55 #include "events_internal.h"
56
57 #define EVENT_WORDS_PER_PAGE (XEN_PAGE_SIZE / sizeof(event_word_t))
58 #define MAX_EVENT_ARRAY_PAGES (EVTCHN_FIFO_NR_CHANNELS / EVENT_WORDS_PER_PAGE)
59
60 struct evtchn_fifo_queue {
61 uint32_t head[EVTCHN_FIFO_MAX_QUEUES];
62 };
63
64 static DEFINE_PER_CPU(struct evtchn_fifo_control_block *, cpu_control_block);
65 static DEFINE_PER_CPU(struct evtchn_fifo_queue, cpu_queue);
66 static event_word_t *event_array[MAX_EVENT_ARRAY_PAGES] __read_mostly;
67 static unsigned event_array_pages __read_mostly;
68
69 /*
70 * sync_set_bit() and friends must be unsigned long aligned.
71 */
72 #if BITS_PER_LONG > 32
73
74 #define BM(w) (unsigned long *)((unsigned long)w & ~0x7UL)
75 #define EVTCHN_FIFO_BIT(b, w) \
76 (((unsigned long)w & 0x4UL) ? (EVTCHN_FIFO_ ##b + 32) : EVTCHN_FIFO_ ##b)
77
78 #else
79
80 #define BM(w) ((unsigned long *)(w))
81 #define EVTCHN_FIFO_BIT(b, w) EVTCHN_FIFO_ ##b
82
83 #endif
84
event_word_from_port(unsigned port)85 static inline event_word_t *event_word_from_port(unsigned port)
86 {
87 unsigned i = port / EVENT_WORDS_PER_PAGE;
88
89 return event_array[i] + port % EVENT_WORDS_PER_PAGE;
90 }
91
evtchn_fifo_max_channels(void)92 static unsigned evtchn_fifo_max_channels(void)
93 {
94 return EVTCHN_FIFO_NR_CHANNELS;
95 }
96
evtchn_fifo_nr_channels(void)97 static unsigned evtchn_fifo_nr_channels(void)
98 {
99 return event_array_pages * EVENT_WORDS_PER_PAGE;
100 }
101
init_control_block(int cpu,struct evtchn_fifo_control_block * control_block)102 static int init_control_block(int cpu,
103 struct evtchn_fifo_control_block *control_block)
104 {
105 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
106 struct evtchn_init_control init_control;
107 unsigned int i;
108
109 /* Reset the control block and the local HEADs. */
110 clear_page(control_block);
111 for (i = 0; i < EVTCHN_FIFO_MAX_QUEUES; i++)
112 q->head[i] = 0;
113
114 init_control.control_gfn = virt_to_gfn(control_block);
115 init_control.offset = 0;
116 init_control.vcpu = xen_vcpu_nr(cpu);
117
118 return HYPERVISOR_event_channel_op(EVTCHNOP_init_control, &init_control);
119 }
120
free_unused_array_pages(void)121 static void free_unused_array_pages(void)
122 {
123 unsigned i;
124
125 for (i = event_array_pages; i < MAX_EVENT_ARRAY_PAGES; i++) {
126 if (!event_array[i])
127 break;
128 free_page((unsigned long)event_array[i]);
129 event_array[i] = NULL;
130 }
131 }
132
init_array_page(event_word_t * array_page)133 static void init_array_page(event_word_t *array_page)
134 {
135 unsigned i;
136
137 for (i = 0; i < EVENT_WORDS_PER_PAGE; i++)
138 array_page[i] = 1 << EVTCHN_FIFO_MASKED;
139 }
140
evtchn_fifo_setup(struct irq_info * info)141 static int evtchn_fifo_setup(struct irq_info *info)
142 {
143 unsigned port = info->evtchn;
144 unsigned new_array_pages;
145 int ret;
146
147 new_array_pages = port / EVENT_WORDS_PER_PAGE + 1;
148
149 if (new_array_pages > MAX_EVENT_ARRAY_PAGES)
150 return -EINVAL;
151
152 while (event_array_pages < new_array_pages) {
153 void *array_page;
154 struct evtchn_expand_array expand_array;
155
156 /* Might already have a page if we've resumed. */
157 array_page = event_array[event_array_pages];
158 if (!array_page) {
159 array_page = (void *)__get_free_page(GFP_KERNEL);
160 if (array_page == NULL) {
161 ret = -ENOMEM;
162 goto error;
163 }
164 event_array[event_array_pages] = array_page;
165 }
166
167 /* Mask all events in this page before adding it. */
168 init_array_page(array_page);
169
170 expand_array.array_gfn = virt_to_gfn(array_page);
171
172 ret = HYPERVISOR_event_channel_op(EVTCHNOP_expand_array, &expand_array);
173 if (ret < 0)
174 goto error;
175
176 event_array_pages++;
177 }
178 return 0;
179
180 error:
181 if (event_array_pages == 0)
182 panic("xen: unable to expand event array with initial page (%d)\n", ret);
183 else
184 pr_err("unable to expand event array (%d)\n", ret);
185 free_unused_array_pages();
186 return ret;
187 }
188
evtchn_fifo_bind_to_cpu(struct irq_info * info,unsigned cpu)189 static void evtchn_fifo_bind_to_cpu(struct irq_info *info, unsigned cpu)
190 {
191 /* no-op */
192 }
193
evtchn_fifo_clear_pending(unsigned port)194 static void evtchn_fifo_clear_pending(unsigned port)
195 {
196 event_word_t *word = event_word_from_port(port);
197 sync_clear_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
198 }
199
evtchn_fifo_set_pending(unsigned port)200 static void evtchn_fifo_set_pending(unsigned port)
201 {
202 event_word_t *word = event_word_from_port(port);
203 sync_set_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
204 }
205
evtchn_fifo_is_pending(unsigned port)206 static bool evtchn_fifo_is_pending(unsigned port)
207 {
208 event_word_t *word = event_word_from_port(port);
209 return sync_test_bit(EVTCHN_FIFO_BIT(PENDING, word), BM(word));
210 }
211
evtchn_fifo_mask(unsigned port)212 static void evtchn_fifo_mask(unsigned port)
213 {
214 event_word_t *word = event_word_from_port(port);
215 sync_set_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
216 }
217
evtchn_fifo_is_masked(unsigned port)218 static bool evtchn_fifo_is_masked(unsigned port)
219 {
220 event_word_t *word = event_word_from_port(port);
221 return sync_test_bit(EVTCHN_FIFO_BIT(MASKED, word), BM(word));
222 }
223 /*
224 * Clear MASKED if not PENDING, spinning if BUSY is set.
225 * Return true if mask was cleared.
226 */
clear_masked_cond(volatile event_word_t * word)227 static bool clear_masked_cond(volatile event_word_t *word)
228 {
229 event_word_t new, old, w;
230
231 w = *word;
232
233 do {
234 if (w & (1 << EVTCHN_FIFO_PENDING))
235 return false;
236
237 old = w & ~(1 << EVTCHN_FIFO_BUSY);
238 new = old & ~(1 << EVTCHN_FIFO_MASKED);
239 w = sync_cmpxchg(word, old, new);
240 } while (w != old);
241
242 return true;
243 }
244
evtchn_fifo_unmask(unsigned port)245 static void evtchn_fifo_unmask(unsigned port)
246 {
247 event_word_t *word = event_word_from_port(port);
248
249 BUG_ON(!irqs_disabled());
250
251 if (!clear_masked_cond(word)) {
252 struct evtchn_unmask unmask = { .port = port };
253 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
254 }
255 }
256
clear_linked(volatile event_word_t * word)257 static uint32_t clear_linked(volatile event_word_t *word)
258 {
259 event_word_t new, old, w;
260
261 w = *word;
262
263 do {
264 old = w;
265 new = (w & ~((1 << EVTCHN_FIFO_LINKED)
266 | EVTCHN_FIFO_LINK_MASK));
267 } while ((w = sync_cmpxchg(word, old, new)) != old);
268
269 return w & EVTCHN_FIFO_LINK_MASK;
270 }
271
consume_one_event(unsigned cpu,struct evtchn_loop_ctrl * ctrl,struct evtchn_fifo_control_block * control_block,unsigned priority,unsigned long * ready)272 static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
273 struct evtchn_fifo_control_block *control_block,
274 unsigned priority, unsigned long *ready)
275 {
276 struct evtchn_fifo_queue *q = &per_cpu(cpu_queue, cpu);
277 uint32_t head;
278 unsigned port;
279 event_word_t *word;
280
281 head = q->head[priority];
282
283 /*
284 * Reached the tail last time? Read the new HEAD from the
285 * control block.
286 */
287 if (head == 0) {
288 virt_rmb(); /* Ensure word is up-to-date before reading head. */
289 head = control_block->head[priority];
290 }
291
292 port = head;
293 word = event_word_from_port(port);
294 head = clear_linked(word);
295
296 /*
297 * If the link is non-zero, there are more events in the
298 * queue, otherwise the queue is empty.
299 *
300 * If the queue is empty, clear this priority from our local
301 * copy of the ready word.
302 */
303 if (head == 0)
304 clear_bit(priority, ready);
305
306 if (evtchn_fifo_is_pending(port) && !evtchn_fifo_is_masked(port)) {
307 if (unlikely(!ctrl))
308 pr_warn("Dropping pending event for port %u\n", port);
309 else
310 handle_irq_for_port(port, ctrl);
311 }
312
313 q->head[priority] = head;
314 }
315
__evtchn_fifo_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)316 static void __evtchn_fifo_handle_events(unsigned cpu,
317 struct evtchn_loop_ctrl *ctrl)
318 {
319 struct evtchn_fifo_control_block *control_block;
320 unsigned long ready;
321 unsigned q;
322
323 control_block = per_cpu(cpu_control_block, cpu);
324
325 ready = xchg(&control_block->ready, 0);
326
327 while (ready) {
328 q = find_first_bit(&ready, EVTCHN_FIFO_MAX_QUEUES);
329 consume_one_event(cpu, ctrl, control_block, q, &ready);
330 ready |= xchg(&control_block->ready, 0);
331 }
332 }
333
evtchn_fifo_handle_events(unsigned cpu,struct evtchn_loop_ctrl * ctrl)334 static void evtchn_fifo_handle_events(unsigned cpu,
335 struct evtchn_loop_ctrl *ctrl)
336 {
337 __evtchn_fifo_handle_events(cpu, ctrl);
338 }
339
evtchn_fifo_resume(void)340 static void evtchn_fifo_resume(void)
341 {
342 unsigned cpu;
343
344 for_each_possible_cpu(cpu) {
345 void *control_block = per_cpu(cpu_control_block, cpu);
346 int ret;
347
348 if (!control_block)
349 continue;
350
351 /*
352 * If this CPU is offline, take the opportunity to
353 * free the control block while it is not being
354 * used.
355 */
356 if (!cpu_online(cpu)) {
357 free_page((unsigned long)control_block);
358 per_cpu(cpu_control_block, cpu) = NULL;
359 continue;
360 }
361
362 ret = init_control_block(cpu, control_block);
363 BUG_ON(ret < 0);
364 }
365
366 /*
367 * The event array starts out as empty again and is extended
368 * as normal when events are bound. The existing pages will
369 * be reused.
370 */
371 event_array_pages = 0;
372 }
373
evtchn_fifo_alloc_control_block(unsigned cpu)374 static int evtchn_fifo_alloc_control_block(unsigned cpu)
375 {
376 void *control_block = NULL;
377 int ret = -ENOMEM;
378
379 control_block = (void *)__get_free_page(GFP_KERNEL);
380 if (control_block == NULL)
381 goto error;
382
383 ret = init_control_block(cpu, control_block);
384 if (ret < 0)
385 goto error;
386
387 per_cpu(cpu_control_block, cpu) = control_block;
388
389 return 0;
390
391 error:
392 free_page((unsigned long)control_block);
393 return ret;
394 }
395
evtchn_fifo_percpu_init(unsigned int cpu)396 static int evtchn_fifo_percpu_init(unsigned int cpu)
397 {
398 if (!per_cpu(cpu_control_block, cpu))
399 return evtchn_fifo_alloc_control_block(cpu);
400 return 0;
401 }
402
evtchn_fifo_percpu_deinit(unsigned int cpu)403 static int evtchn_fifo_percpu_deinit(unsigned int cpu)
404 {
405 __evtchn_fifo_handle_events(cpu, NULL);
406 return 0;
407 }
408
409 static const struct evtchn_ops evtchn_ops_fifo = {
410 .max_channels = evtchn_fifo_max_channels,
411 .nr_channels = evtchn_fifo_nr_channels,
412 .setup = evtchn_fifo_setup,
413 .bind_to_cpu = evtchn_fifo_bind_to_cpu,
414 .clear_pending = evtchn_fifo_clear_pending,
415 .set_pending = evtchn_fifo_set_pending,
416 .is_pending = evtchn_fifo_is_pending,
417 .mask = evtchn_fifo_mask,
418 .unmask = evtchn_fifo_unmask,
419 .handle_events = evtchn_fifo_handle_events,
420 .resume = evtchn_fifo_resume,
421 .percpu_init = evtchn_fifo_percpu_init,
422 .percpu_deinit = evtchn_fifo_percpu_deinit,
423 };
424
xen_evtchn_fifo_init(void)425 int __init xen_evtchn_fifo_init(void)
426 {
427 int cpu = smp_processor_id();
428 int ret;
429
430 ret = evtchn_fifo_alloc_control_block(cpu);
431 if (ret < 0)
432 return ret;
433
434 pr_info("Using FIFO-based ABI\n");
435
436 evtchn_ops = &evtchn_ops_fifo;
437
438 return ret;
439 }
440