1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2023 MediaTek Inc.
4 */
5
6 #include <linux/eventfd.h>
7 #include <linux/syscalls.h>
8 #include <linux/gzvm_drv.h>
9 #include "gzvm_common.h"
10
11 struct gzvm_irq_ack_notifier {
12 struct hlist_node link;
13 unsigned int gsi;
14 void (*irq_acked)(struct gzvm_irq_ack_notifier *ian);
15 };
16
17 /**
18 * struct gzvm_kernel_irqfd: gzvm kernel irqfd descriptor.
19 * @gzvm: Pointer to struct gzvm.
20 * @wait: Wait queue entry.
21 * @gsi: Used for level IRQ fast-path.
22 * @eventfd: Used for setup/shutdown.
23 * @list: struct list_head.
24 * @pt: struct poll_table_struct.
25 * @shutdown: struct work_struct.
26 */
27 struct gzvm_kernel_irqfd {
28 struct gzvm *gzvm;
29 wait_queue_entry_t wait;
30
31 int gsi;
32
33 struct eventfd_ctx *eventfd;
34 struct list_head list;
35 poll_table pt;
36 struct work_struct shutdown;
37 };
38
39 static struct workqueue_struct *irqfd_cleanup_wq;
40
41 /**
42 * irqfd_set_irq(): irqfd to inject virtual interrupt.
43 * @gzvm: Pointer to gzvm.
44 * @irq: This is spi interrupt number (starts from 0 instead of 32).
45 * @level: irq triggered level.
46 */
irqfd_set_irq(struct gzvm * gzvm,u32 irq,int level)47 static void irqfd_set_irq(struct gzvm *gzvm, u32 irq, int level)
48 {
49 if (level)
50 gzvm_irqchip_inject_irq(gzvm, 0, irq, level);
51 }
52
53 /**
54 * irqfd_shutdown() - Race-free decouple logic (ordering is critical).
55 * @work: Pointer to work_struct.
56 */
irqfd_shutdown(struct work_struct * work)57 static void irqfd_shutdown(struct work_struct *work)
58 {
59 struct gzvm_kernel_irqfd *irqfd =
60 container_of(work, struct gzvm_kernel_irqfd, shutdown);
61 struct gzvm *gzvm = irqfd->gzvm;
62 u64 cnt;
63
64 /* Make sure irqfd has been initialized in assign path. */
65 synchronize_srcu(&gzvm->irq_srcu);
66
67 /*
68 * Synchronize with the wait-queue and unhook ourselves to prevent
69 * further events.
70 */
71 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
72
73 /*
74 * It is now safe to release the object's resources
75 */
76 eventfd_ctx_put(irqfd->eventfd);
77 kfree(irqfd);
78 }
79
80 /**
81 * irqfd_is_active() - Assumes gzvm->irqfds.lock is held.
82 * @irqfd: Pointer to gzvm_kernel_irqfd.
83 *
84 * Return:
85 * * true - irqfd is active.
86 */
irqfd_is_active(struct gzvm_kernel_irqfd * irqfd)87 static bool irqfd_is_active(struct gzvm_kernel_irqfd *irqfd)
88 {
89 return list_empty(&irqfd->list) ? false : true;
90 }
91
92 /**
93 * irqfd_deactivate() - Mark the irqfd as inactive and schedule it for removal.
94 * assumes gzvm->irqfds.lock is held.
95 * @irqfd: Pointer to gzvm_kernel_irqfd.
96 */
irqfd_deactivate(struct gzvm_kernel_irqfd * irqfd)97 static void irqfd_deactivate(struct gzvm_kernel_irqfd *irqfd)
98 {
99 if (!irqfd_is_active(irqfd))
100 return;
101
102 list_del_init(&irqfd->list);
103
104 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
105 }
106
107 /**
108 * irqfd_wakeup() - Callback of irqfd wait queue, would be woken by writing to
109 * irqfd to do virtual interrupt injection.
110 * @wait: Pointer to wait_queue_entry_t.
111 * @mode: Unused.
112 * @sync: Unused.
113 * @key: Get flags about Epoll events.
114 *
115 * Return:
116 * * 0 - Success
117 */
irqfd_wakeup(wait_queue_entry_t * wait,unsigned int mode,int sync,void * key)118 static int irqfd_wakeup(wait_queue_entry_t *wait, unsigned int mode, int sync,
119 void *key)
120 {
121 struct gzvm_kernel_irqfd *irqfd =
122 container_of(wait, struct gzvm_kernel_irqfd, wait);
123 __poll_t flags = key_to_poll(key);
124 struct gzvm *gzvm = irqfd->gzvm;
125
126 if (flags & EPOLLIN) {
127 u64 cnt;
128
129 eventfd_ctx_do_read(irqfd->eventfd, &cnt);
130 /* gzvm's irq injection is not blocked, don't need workq */
131 irqfd_set_irq(gzvm, irqfd->gsi, 1);
132 }
133
134 if (flags & EPOLLHUP) {
135 /* The eventfd is closing, detach from GZVM */
136 unsigned long iflags;
137
138 spin_lock_irqsave(&gzvm->irqfds.lock, iflags);
139
140 /*
141 * Do more check if someone deactivated the irqfd before
142 * we could acquire the irqfds.lock.
143 */
144 if (irqfd_is_active(irqfd))
145 irqfd_deactivate(irqfd);
146
147 spin_unlock_irqrestore(&gzvm->irqfds.lock, iflags);
148 }
149
150 return 0;
151 }
152
irqfd_ptable_queue_proc(struct file * file,wait_queue_head_t * wqh,poll_table * pt)153 static void irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
154 poll_table *pt)
155 {
156 struct gzvm_kernel_irqfd *irqfd =
157 container_of(pt, struct gzvm_kernel_irqfd, pt);
158 add_wait_queue_priority(wqh, &irqfd->wait);
159 }
160
gzvm_irqfd_assign(struct gzvm * gzvm,struct gzvm_irqfd * args)161 static int gzvm_irqfd_assign(struct gzvm *gzvm, struct gzvm_irqfd *args)
162 {
163 struct gzvm_kernel_irqfd *irqfd, *tmp;
164 struct fd f;
165 struct eventfd_ctx *eventfd = NULL;
166 int ret;
167 int idx;
168
169 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL_ACCOUNT);
170 if (!irqfd)
171 return -ENOMEM;
172
173 irqfd->gzvm = gzvm;
174 irqfd->gsi = args->gsi;
175
176 INIT_LIST_HEAD(&irqfd->list);
177 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
178
179 f = fdget(args->fd);
180 if (!f.file) {
181 ret = -EBADF;
182 goto out;
183 }
184
185 eventfd = eventfd_ctx_fileget(f.file);
186 if (IS_ERR(eventfd)) {
187 ret = PTR_ERR(eventfd);
188 goto fail;
189 }
190
191 irqfd->eventfd = eventfd;
192
193 /*
194 * Install our own custom wake-up handling so we are notified via
195 * a callback whenever someone signals the underlying eventfd
196 */
197 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
198 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
199
200 spin_lock_irq(&gzvm->irqfds.lock);
201
202 ret = 0;
203 list_for_each_entry(tmp, &gzvm->irqfds.items, list) {
204 if (irqfd->eventfd != tmp->eventfd)
205 continue;
206 /* This fd is used for another irq already. */
207 pr_err("already used: gsi=%d fd=%d\n", args->gsi, args->fd);
208 ret = -EBUSY;
209 spin_unlock_irq(&gzvm->irqfds.lock);
210 goto fail;
211 }
212
213 idx = srcu_read_lock(&gzvm->irq_srcu);
214
215 list_add_tail(&irqfd->list, &gzvm->irqfds.items);
216
217 spin_unlock_irq(&gzvm->irqfds.lock);
218
219 vfs_poll(f.file, &irqfd->pt);
220
221 srcu_read_unlock(&gzvm->irq_srcu, idx);
222
223 /*
224 * do not drop the file until the irqfd is fully initialized, otherwise
225 * we might race against the EPOLLHUP
226 */
227 fdput(f);
228 return 0;
229
230 fail:
231 if (eventfd && !IS_ERR(eventfd))
232 eventfd_ctx_put(eventfd);
233
234 fdput(f);
235
236 out:
237 kfree(irqfd);
238 return ret;
239 }
240
gzvm_notify_acked_gsi(struct gzvm * gzvm,int gsi)241 static void gzvm_notify_acked_gsi(struct gzvm *gzvm, int gsi)
242 {
243 struct gzvm_irq_ack_notifier *gian;
244
245 hlist_for_each_entry_srcu(gian, &gzvm->irq_ack_notifier_list,
246 link, srcu_read_lock_held(&gzvm->irq_srcu))
247 if (gian->gsi == gsi)
248 gian->irq_acked(gian);
249 }
250
gzvm_notify_acked_irq(struct gzvm * gzvm,unsigned int gsi)251 void gzvm_notify_acked_irq(struct gzvm *gzvm, unsigned int gsi)
252 {
253 int idx;
254
255 idx = srcu_read_lock(&gzvm->irq_srcu);
256 gzvm_notify_acked_gsi(gzvm, gsi);
257 srcu_read_unlock(&gzvm->irq_srcu, idx);
258 }
259
260 /**
261 * gzvm_irqfd_deassign() - Shutdown any irqfd's that match fd+gsi.
262 * @gzvm: Pointer to gzvm.
263 * @args: Pointer to gzvm_irqfd.
264 *
265 * Return:
266 * * 0 - Success.
267 * * Negative value - Failure.
268 */
gzvm_irqfd_deassign(struct gzvm * gzvm,struct gzvm_irqfd * args)269 static int gzvm_irqfd_deassign(struct gzvm *gzvm, struct gzvm_irqfd *args)
270 {
271 struct gzvm_kernel_irqfd *irqfd, *tmp;
272 struct eventfd_ctx *eventfd;
273
274 eventfd = eventfd_ctx_fdget(args->fd);
275 if (IS_ERR(eventfd))
276 return PTR_ERR(eventfd);
277
278 spin_lock_irq(&gzvm->irqfds.lock);
279
280 list_for_each_entry_safe(irqfd, tmp, &gzvm->irqfds.items, list) {
281 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi)
282 irqfd_deactivate(irqfd);
283 }
284
285 spin_unlock_irq(&gzvm->irqfds.lock);
286 eventfd_ctx_put(eventfd);
287
288 /*
289 * Block until we know all outstanding shutdown jobs have completed
290 * so that we guarantee there will not be any more interrupts on this
291 * gsi once this deassign function returns.
292 */
293 flush_workqueue(irqfd_cleanup_wq);
294
295 return 0;
296 }
297
gzvm_irqfd(struct gzvm * gzvm,struct gzvm_irqfd * args)298 int gzvm_irqfd(struct gzvm *gzvm, struct gzvm_irqfd *args)
299 {
300 for (int i = 0; i < ARRAY_SIZE(args->pad); i++) {
301 if (args->pad[i])
302 return -EINVAL;
303 }
304
305 if (args->flags &
306 ~(GZVM_IRQFD_FLAG_DEASSIGN | GZVM_IRQFD_FLAG_RESAMPLE))
307 return -EINVAL;
308
309 if (args->flags & GZVM_IRQFD_FLAG_DEASSIGN)
310 return gzvm_irqfd_deassign(gzvm, args);
311
312 return gzvm_irqfd_assign(gzvm, args);
313 }
314
315 /**
316 * gzvm_vm_irqfd_init() - Initialize irqfd data structure per VM
317 *
318 * @gzvm: Pointer to struct gzvm.
319 *
320 * Return:
321 * * 0 - Success.
322 * * Negative - Failure.
323 */
gzvm_vm_irqfd_init(struct gzvm * gzvm)324 int gzvm_vm_irqfd_init(struct gzvm *gzvm)
325 {
326 mutex_init(&gzvm->irq_lock);
327
328 spin_lock_init(&gzvm->irqfds.lock);
329 INIT_LIST_HEAD(&gzvm->irqfds.items);
330 if (init_srcu_struct(&gzvm->irq_srcu))
331 return -EINVAL;
332 INIT_HLIST_HEAD(&gzvm->irq_ack_notifier_list);
333
334 return 0;
335 }
336
337 /**
338 * gzvm_vm_irqfd_release() - This function is called as the gzvm VM fd is being
339 * released. Shutdown all irqfds that still remain open.
340 * @gzvm: Pointer to gzvm.
341 */
gzvm_vm_irqfd_release(struct gzvm * gzvm)342 void gzvm_vm_irqfd_release(struct gzvm *gzvm)
343 {
344 struct gzvm_kernel_irqfd *irqfd, *tmp;
345
346 spin_lock_irq(&gzvm->irqfds.lock);
347
348 list_for_each_entry_safe(irqfd, tmp, &gzvm->irqfds.items, list)
349 irqfd_deactivate(irqfd);
350
351 spin_unlock_irq(&gzvm->irqfds.lock);
352
353 /*
354 * Block until we know all outstanding shutdown jobs have completed.
355 */
356 flush_workqueue(irqfd_cleanup_wq);
357 }
358
359 /**
360 * gzvm_drv_irqfd_init() - Erase flushing work items when a VM exits.
361 *
362 * Return:
363 * * 0 - Success.
364 * * Negative - Failure.
365 *
366 * Create a host-wide workqueue for issuing deferred shutdown requests
367 * aggregated from all vm* instances. We need our own isolated
368 * queue to ease flushing work items when a VM exits.
369 */
gzvm_drv_irqfd_init(void)370 int gzvm_drv_irqfd_init(void)
371 {
372 irqfd_cleanup_wq = alloc_workqueue("gzvm-irqfd-cleanup", 0, 0);
373 if (!irqfd_cleanup_wq)
374 return -ENOMEM;
375
376 return 0;
377 }
378
gzvm_drv_irqfd_exit(void)379 void gzvm_drv_irqfd_exit(void)
380 {
381 destroy_workqueue(irqfd_cleanup_wq);
382 }
383