1 /*
2 * Copyright 2014 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include <linux/mm_types.h>
24 #include <linux/slab.h>
25 #include <linux/types.h>
26 #include <linux/sched.h>
27 #include <linux/uaccess.h>
28 #include <linux/mm.h>
29 #include <linux/mman.h>
30 #include <linux/memory.h>
31 #include "kfd_priv.h"
32 #include "kfd_events.h"
33 #include <linux/device.h>
34
35 /*
36 * A task can only be on a single wait_queue at a time, but we need to support
37 * waiting on multiple events (any/all).
38 * Instead of each event simply having a wait_queue with sleeping tasks, it
39 * has a singly-linked list of tasks.
40 * A thread that wants to sleep creates an array of these, one for each event
41 * and adds one to each event's waiter chain.
42 */
43 struct kfd_event_waiter {
44 struct list_head waiters;
45 struct task_struct *sleeping_task;
46
47 /* Transitions to true when the event this belongs to is signaled. */
48 bool activated;
49
50 /* Event */
51 struct kfd_event *event;
52 uint32_t input_index;
53 };
54
55 /*
56 * Over-complicated pooled allocator for event notification slots.
57 *
58 * Each signal event needs a 64-bit signal slot where the signaler will write
59 * a 1 before sending an interrupt.l (This is needed because some interrupts
60 * do not contain enough spare data bits to identify an event.)
61 * We get whole pages from vmalloc and map them to the process VA.
62 * Individual signal events are then allocated a slot in a page.
63 */
64
65 struct signal_page {
66 struct list_head event_pages; /* kfd_process.signal_event_pages */
67 uint64_t *kernel_address;
68 uint64_t __user *user_address;
69 uint32_t page_index; /* Index into the mmap aperture. */
70 unsigned int free_slots;
71 unsigned long used_slot_bitmap[0];
72 };
73
74 #define SLOTS_PER_PAGE KFD_SIGNAL_EVENT_LIMIT
75 #define SLOT_BITMAP_SIZE BITS_TO_LONGS(SLOTS_PER_PAGE)
76 #define BITS_PER_PAGE (ilog2(SLOTS_PER_PAGE)+1)
77 #define SIGNAL_PAGE_SIZE (sizeof(struct signal_page) + \
78 SLOT_BITMAP_SIZE * sizeof(long))
79
80 /*
81 * For signal events, the event ID is used as the interrupt user data.
82 * For SQ s_sendmsg interrupts, this is limited to 8 bits.
83 */
84
85 #define INTERRUPT_DATA_BITS 8
86 #define SIGNAL_EVENT_ID_SLOT_SHIFT 0
87
page_slots(struct signal_page * page)88 static uint64_t *page_slots(struct signal_page *page)
89 {
90 return page->kernel_address;
91 }
92
allocate_free_slot(struct kfd_process * process,struct signal_page ** out_page,unsigned int * out_slot_index)93 static bool allocate_free_slot(struct kfd_process *process,
94 struct signal_page **out_page,
95 unsigned int *out_slot_index)
96 {
97 struct signal_page *page;
98
99 list_for_each_entry(page, &process->signal_event_pages, event_pages) {
100 if (page->free_slots > 0) {
101 unsigned int slot =
102 find_first_zero_bit(page->used_slot_bitmap,
103 SLOTS_PER_PAGE);
104
105 __set_bit(slot, page->used_slot_bitmap);
106 page->free_slots--;
107
108 page_slots(page)[slot] = UNSIGNALED_EVENT_SLOT;
109
110 *out_page = page;
111 *out_slot_index = slot;
112
113 pr_debug("allocated event signal slot in page %p, slot %d\n",
114 page, slot);
115
116 return true;
117 }
118 }
119
120 pr_debug("No free event signal slots were found for process %p\n",
121 process);
122
123 return false;
124 }
125
126 #define list_tail_entry(head, type, member) \
127 list_entry((head)->prev, type, member)
128
allocate_signal_page(struct file * devkfd,struct kfd_process * p)129 static bool allocate_signal_page(struct file *devkfd, struct kfd_process *p)
130 {
131 void *backing_store;
132 struct signal_page *page;
133
134 page = kzalloc(SIGNAL_PAGE_SIZE, GFP_KERNEL);
135 if (!page)
136 goto fail_alloc_signal_page;
137
138 page->free_slots = SLOTS_PER_PAGE;
139
140 backing_store = (void *) __get_free_pages(GFP_KERNEL | __GFP_ZERO,
141 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
142 if (!backing_store)
143 goto fail_alloc_signal_store;
144
145 /* prevent user-mode info leaks */
146 memset(backing_store, (uint8_t) UNSIGNALED_EVENT_SLOT,
147 KFD_SIGNAL_EVENT_LIMIT * 8);
148
149 page->kernel_address = backing_store;
150
151 if (list_empty(&p->signal_event_pages))
152 page->page_index = 0;
153 else
154 page->page_index = list_tail_entry(&p->signal_event_pages,
155 struct signal_page,
156 event_pages)->page_index + 1;
157
158 pr_debug("allocated new event signal page at %p, for process %p\n",
159 page, p);
160 pr_debug("page index is %d\n", page->page_index);
161
162 list_add(&page->event_pages, &p->signal_event_pages);
163
164 return true;
165
166 fail_alloc_signal_store:
167 kfree(page);
168 fail_alloc_signal_page:
169 return false;
170 }
171
allocate_event_notification_slot(struct file * devkfd,struct kfd_process * p,struct signal_page ** page,unsigned int * signal_slot_index)172 static bool allocate_event_notification_slot(struct file *devkfd,
173 struct kfd_process *p,
174 struct signal_page **page,
175 unsigned int *signal_slot_index)
176 {
177 bool ret;
178
179 ret = allocate_free_slot(p, page, signal_slot_index);
180 if (!ret) {
181 ret = allocate_signal_page(devkfd, p);
182 if (ret)
183 ret = allocate_free_slot(p, page, signal_slot_index);
184 }
185
186 return ret;
187 }
188
189 /* Assumes that the process's event_mutex is locked. */
release_event_notification_slot(struct signal_page * page,size_t slot_index)190 static void release_event_notification_slot(struct signal_page *page,
191 size_t slot_index)
192 {
193 __clear_bit(slot_index, page->used_slot_bitmap);
194 page->free_slots++;
195
196 /* We don't free signal pages, they are retained by the process
197 * and reused until it exits. */
198 }
199
lookup_signal_page_by_index(struct kfd_process * p,unsigned int page_index)200 static struct signal_page *lookup_signal_page_by_index(struct kfd_process *p,
201 unsigned int page_index)
202 {
203 struct signal_page *page;
204
205 /*
206 * This is safe because we don't delete signal pages until the
207 * process exits.
208 */
209 list_for_each_entry(page, &p->signal_event_pages, event_pages)
210 if (page->page_index == page_index)
211 return page;
212
213 return NULL;
214 }
215
216 /*
217 * Assumes that p->event_mutex is held and of course that p is not going
218 * away (current or locked).
219 */
lookup_event_by_id(struct kfd_process * p,uint32_t id)220 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
221 {
222 struct kfd_event *ev;
223
224 hash_for_each_possible(p->events, ev, events, id)
225 if (ev->event_id == id)
226 return ev;
227
228 return NULL;
229 }
230
make_signal_event_id(struct signal_page * page,unsigned int signal_slot_index)231 static u32 make_signal_event_id(struct signal_page *page,
232 unsigned int signal_slot_index)
233 {
234 return page->page_index |
235 (signal_slot_index << SIGNAL_EVENT_ID_SLOT_SHIFT);
236 }
237
238 /*
239 * Produce a kfd event id for a nonsignal event.
240 * These are arbitrary numbers, so we do a sequential search through
241 * the hash table for an unused number.
242 */
make_nonsignal_event_id(struct kfd_process * p)243 static u32 make_nonsignal_event_id(struct kfd_process *p)
244 {
245 u32 id;
246
247 for (id = p->next_nonsignal_event_id;
248 id < KFD_LAST_NONSIGNAL_EVENT_ID &&
249 lookup_event_by_id(p, id) != NULL;
250 id++)
251 ;
252
253 if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
254
255 /*
256 * What if id == LAST_NONSIGNAL_EVENT_ID - 1?
257 * Then next_nonsignal_event_id = LAST_NONSIGNAL_EVENT_ID so
258 * the first loop fails immediately and we proceed with the
259 * wraparound loop below.
260 */
261 p->next_nonsignal_event_id = id + 1;
262
263 return id;
264 }
265
266 for (id = KFD_FIRST_NONSIGNAL_EVENT_ID;
267 id < KFD_LAST_NONSIGNAL_EVENT_ID &&
268 lookup_event_by_id(p, id) != NULL;
269 id++)
270 ;
271
272
273 if (id < KFD_LAST_NONSIGNAL_EVENT_ID) {
274 p->next_nonsignal_event_id = id + 1;
275 return id;
276 }
277
278 p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
279 return 0;
280 }
281
lookup_event_by_page_slot(struct kfd_process * p,struct signal_page * page,unsigned int signal_slot)282 static struct kfd_event *lookup_event_by_page_slot(struct kfd_process *p,
283 struct signal_page *page,
284 unsigned int signal_slot)
285 {
286 return lookup_event_by_id(p, make_signal_event_id(page, signal_slot));
287 }
288
create_signal_event(struct file * devkfd,struct kfd_process * p,struct kfd_event * ev)289 static int create_signal_event(struct file *devkfd,
290 struct kfd_process *p,
291 struct kfd_event *ev)
292 {
293 if (p->signal_event_count == KFD_SIGNAL_EVENT_LIMIT) {
294 pr_warn("amdkfd: Signal event wasn't created because limit was reached\n");
295 return -ENOMEM;
296 }
297
298 if (!allocate_event_notification_slot(devkfd, p, &ev->signal_page,
299 &ev->signal_slot_index)) {
300 pr_warn("amdkfd: Signal event wasn't created because out of kernel memory\n");
301 return -ENOMEM;
302 }
303
304 p->signal_event_count++;
305
306 ev->user_signal_address =
307 &ev->signal_page->user_address[ev->signal_slot_index];
308
309 ev->event_id = make_signal_event_id(ev->signal_page,
310 ev->signal_slot_index);
311
312 pr_debug("signal event number %zu created with id %d, address %p\n",
313 p->signal_event_count, ev->event_id,
314 ev->user_signal_address);
315
316 pr_debug("signal event number %zu created with id %d, address %p\n",
317 p->signal_event_count, ev->event_id,
318 ev->user_signal_address);
319
320 return 0;
321 }
322
323 /*
324 * No non-signal events are supported yet.
325 * We create them as events that never signal.
326 * Set event calls from user-mode are failed.
327 */
create_other_event(struct kfd_process * p,struct kfd_event * ev)328 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
329 {
330 ev->event_id = make_nonsignal_event_id(p);
331 if (ev->event_id == 0)
332 return -ENOMEM;
333
334 return 0;
335 }
336
kfd_event_init_process(struct kfd_process * p)337 void kfd_event_init_process(struct kfd_process *p)
338 {
339 mutex_init(&p->event_mutex);
340 hash_init(p->events);
341 INIT_LIST_HEAD(&p->signal_event_pages);
342 p->next_nonsignal_event_id = KFD_FIRST_NONSIGNAL_EVENT_ID;
343 p->signal_event_count = 0;
344 }
345
destroy_event(struct kfd_process * p,struct kfd_event * ev)346 static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
347 {
348 if (ev->signal_page != NULL) {
349 release_event_notification_slot(ev->signal_page,
350 ev->signal_slot_index);
351 p->signal_event_count--;
352 }
353
354 /*
355 * Abandon the list of waiters. Individual waiting threads will
356 * clean up their own data.
357 */
358 list_del(&ev->waiters);
359
360 hash_del(&ev->events);
361 kfree(ev);
362 }
363
destroy_events(struct kfd_process * p)364 static void destroy_events(struct kfd_process *p)
365 {
366 struct kfd_event *ev;
367 struct hlist_node *tmp;
368 unsigned int hash_bkt;
369
370 hash_for_each_safe(p->events, hash_bkt, tmp, ev, events)
371 destroy_event(p, ev);
372 }
373
374 /*
375 * We assume that the process is being destroyed and there is no need to
376 * unmap the pages or keep bookkeeping data in order.
377 */
shutdown_signal_pages(struct kfd_process * p)378 static void shutdown_signal_pages(struct kfd_process *p)
379 {
380 struct signal_page *page, *tmp;
381
382 list_for_each_entry_safe(page, tmp, &p->signal_event_pages,
383 event_pages) {
384 free_pages((unsigned long)page->kernel_address,
385 get_order(KFD_SIGNAL_EVENT_LIMIT * 8));
386 kfree(page);
387 }
388 }
389
kfd_event_free_process(struct kfd_process * p)390 void kfd_event_free_process(struct kfd_process *p)
391 {
392 destroy_events(p);
393 shutdown_signal_pages(p);
394 }
395
event_can_be_gpu_signaled(const struct kfd_event * ev)396 static bool event_can_be_gpu_signaled(const struct kfd_event *ev)
397 {
398 return ev->type == KFD_EVENT_TYPE_SIGNAL ||
399 ev->type == KFD_EVENT_TYPE_DEBUG;
400 }
401
event_can_be_cpu_signaled(const struct kfd_event * ev)402 static bool event_can_be_cpu_signaled(const struct kfd_event *ev)
403 {
404 return ev->type == KFD_EVENT_TYPE_SIGNAL;
405 }
406
kfd_event_create(struct file * devkfd,struct kfd_process * p,uint32_t event_type,bool auto_reset,uint32_t node_id,uint32_t * event_id,uint32_t * event_trigger_data,uint64_t * event_page_offset,uint32_t * event_slot_index)407 int kfd_event_create(struct file *devkfd, struct kfd_process *p,
408 uint32_t event_type, bool auto_reset, uint32_t node_id,
409 uint32_t *event_id, uint32_t *event_trigger_data,
410 uint64_t *event_page_offset, uint32_t *event_slot_index)
411 {
412 int ret = 0;
413 struct kfd_event *ev = kzalloc(sizeof(*ev), GFP_KERNEL);
414
415 if (!ev)
416 return -ENOMEM;
417
418 ev->type = event_type;
419 ev->auto_reset = auto_reset;
420 ev->signaled = false;
421
422 INIT_LIST_HEAD(&ev->waiters);
423
424 *event_page_offset = 0;
425
426 mutex_lock(&p->event_mutex);
427
428 switch (event_type) {
429 case KFD_EVENT_TYPE_SIGNAL:
430 case KFD_EVENT_TYPE_DEBUG:
431 ret = create_signal_event(devkfd, p, ev);
432 if (!ret) {
433 *event_page_offset = (ev->signal_page->page_index |
434 KFD_MMAP_EVENTS_MASK);
435 *event_page_offset <<= PAGE_SHIFT;
436 *event_slot_index = ev->signal_slot_index;
437 }
438 break;
439 default:
440 ret = create_other_event(p, ev);
441 break;
442 }
443
444 if (!ret) {
445 hash_add(p->events, &ev->events, ev->event_id);
446
447 *event_id = ev->event_id;
448 *event_trigger_data = ev->event_id;
449 } else {
450 kfree(ev);
451 }
452
453 mutex_unlock(&p->event_mutex);
454
455 return ret;
456 }
457
458 /* Assumes that p is current. */
kfd_event_destroy(struct kfd_process * p,uint32_t event_id)459 int kfd_event_destroy(struct kfd_process *p, uint32_t event_id)
460 {
461 struct kfd_event *ev;
462 int ret = 0;
463
464 mutex_lock(&p->event_mutex);
465
466 ev = lookup_event_by_id(p, event_id);
467
468 if (ev)
469 destroy_event(p, ev);
470 else
471 ret = -EINVAL;
472
473 mutex_unlock(&p->event_mutex);
474 return ret;
475 }
476
set_event(struct kfd_event * ev)477 static void set_event(struct kfd_event *ev)
478 {
479 struct kfd_event_waiter *waiter;
480 struct kfd_event_waiter *next;
481
482 /* Auto reset if the list is non-empty and we're waking someone. */
483 ev->signaled = !ev->auto_reset || list_empty(&ev->waiters);
484
485 list_for_each_entry_safe(waiter, next, &ev->waiters, waiters) {
486 waiter->activated = true;
487
488 /* _init because free_waiters will call list_del */
489 list_del_init(&waiter->waiters);
490
491 wake_up_process(waiter->sleeping_task);
492 }
493 }
494
495 /* Assumes that p is current. */
kfd_set_event(struct kfd_process * p,uint32_t event_id)496 int kfd_set_event(struct kfd_process *p, uint32_t event_id)
497 {
498 int ret = 0;
499 struct kfd_event *ev;
500
501 mutex_lock(&p->event_mutex);
502
503 ev = lookup_event_by_id(p, event_id);
504
505 if (ev && event_can_be_cpu_signaled(ev))
506 set_event(ev);
507 else
508 ret = -EINVAL;
509
510 mutex_unlock(&p->event_mutex);
511 return ret;
512 }
513
reset_event(struct kfd_event * ev)514 static void reset_event(struct kfd_event *ev)
515 {
516 ev->signaled = false;
517 }
518
519 /* Assumes that p is current. */
kfd_reset_event(struct kfd_process * p,uint32_t event_id)520 int kfd_reset_event(struct kfd_process *p, uint32_t event_id)
521 {
522 int ret = 0;
523 struct kfd_event *ev;
524
525 mutex_lock(&p->event_mutex);
526
527 ev = lookup_event_by_id(p, event_id);
528
529 if (ev && event_can_be_cpu_signaled(ev))
530 reset_event(ev);
531 else
532 ret = -EINVAL;
533
534 mutex_unlock(&p->event_mutex);
535 return ret;
536
537 }
538
acknowledge_signal(struct kfd_process * p,struct kfd_event * ev)539 static void acknowledge_signal(struct kfd_process *p, struct kfd_event *ev)
540 {
541 page_slots(ev->signal_page)[ev->signal_slot_index] =
542 UNSIGNALED_EVENT_SLOT;
543 }
544
is_slot_signaled(struct signal_page * page,unsigned int index)545 static bool is_slot_signaled(struct signal_page *page, unsigned int index)
546 {
547 return page_slots(page)[index] != UNSIGNALED_EVENT_SLOT;
548 }
549
set_event_from_interrupt(struct kfd_process * p,struct kfd_event * ev)550 static void set_event_from_interrupt(struct kfd_process *p,
551 struct kfd_event *ev)
552 {
553 if (ev && event_can_be_gpu_signaled(ev)) {
554 acknowledge_signal(p, ev);
555 set_event(ev);
556 }
557 }
558
kfd_signal_event_interrupt(unsigned int pasid,uint32_t partial_id,uint32_t valid_id_bits)559 void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
560 uint32_t valid_id_bits)
561 {
562 struct kfd_event *ev;
563
564 /*
565 * Because we are called from arbitrary context (workqueue) as opposed
566 * to process context, kfd_process could attempt to exit while we are
567 * running so the lookup function returns a locked process.
568 */
569 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
570
571 if (!p)
572 return; /* Presumably process exited. */
573
574 mutex_lock(&p->event_mutex);
575
576 if (valid_id_bits >= INTERRUPT_DATA_BITS) {
577 /* Partial ID is a full ID. */
578 ev = lookup_event_by_id(p, partial_id);
579 set_event_from_interrupt(p, ev);
580 } else {
581 /*
582 * Partial ID is in fact partial. For now we completely
583 * ignore it, but we could use any bits we did receive to
584 * search faster.
585 */
586 struct signal_page *page;
587 unsigned i;
588
589 list_for_each_entry(page, &p->signal_event_pages, event_pages)
590 for (i = 0; i < SLOTS_PER_PAGE; i++)
591 if (is_slot_signaled(page, i)) {
592 ev = lookup_event_by_page_slot(p,
593 page, i);
594 set_event_from_interrupt(p, ev);
595 }
596 }
597
598 mutex_unlock(&p->event_mutex);
599 mutex_unlock(&p->mutex);
600 }
601
alloc_event_waiters(uint32_t num_events)602 static struct kfd_event_waiter *alloc_event_waiters(uint32_t num_events)
603 {
604 struct kfd_event_waiter *event_waiters;
605 uint32_t i;
606
607 event_waiters = kmalloc_array(num_events,
608 sizeof(struct kfd_event_waiter),
609 GFP_KERNEL);
610
611 for (i = 0; (event_waiters) && (i < num_events) ; i++) {
612 INIT_LIST_HEAD(&event_waiters[i].waiters);
613 event_waiters[i].sleeping_task = current;
614 event_waiters[i].activated = false;
615 }
616
617 return event_waiters;
618 }
619
init_event_waiter(struct kfd_process * p,struct kfd_event_waiter * waiter,uint32_t event_id,uint32_t input_index)620 static int init_event_waiter(struct kfd_process *p,
621 struct kfd_event_waiter *waiter,
622 uint32_t event_id,
623 uint32_t input_index)
624 {
625 struct kfd_event *ev = lookup_event_by_id(p, event_id);
626
627 if (!ev)
628 return -EINVAL;
629
630 waiter->event = ev;
631 waiter->input_index = input_index;
632 waiter->activated = ev->signaled;
633 ev->signaled = ev->signaled && !ev->auto_reset;
634
635 list_add(&waiter->waiters, &ev->waiters);
636
637 return 0;
638 }
639
test_event_condition(bool all,uint32_t num_events,struct kfd_event_waiter * event_waiters)640 static bool test_event_condition(bool all, uint32_t num_events,
641 struct kfd_event_waiter *event_waiters)
642 {
643 uint32_t i;
644 uint32_t activated_count = 0;
645
646 for (i = 0; i < num_events; i++) {
647 if (event_waiters[i].activated) {
648 if (!all)
649 return true;
650
651 activated_count++;
652 }
653 }
654
655 return activated_count == num_events;
656 }
657
658 /*
659 * Copy event specific data, if defined.
660 * Currently only memory exception events have additional data to copy to user
661 */
copy_signaled_event_data(uint32_t num_events,struct kfd_event_waiter * event_waiters,struct kfd_event_data __user * data)662 static bool copy_signaled_event_data(uint32_t num_events,
663 struct kfd_event_waiter *event_waiters,
664 struct kfd_event_data __user *data)
665 {
666 struct kfd_hsa_memory_exception_data *src;
667 struct kfd_hsa_memory_exception_data __user *dst;
668 struct kfd_event_waiter *waiter;
669 struct kfd_event *event;
670 uint32_t i;
671
672 for (i = 0; i < num_events; i++) {
673 waiter = &event_waiters[i];
674 event = waiter->event;
675 if (waiter->activated && event->type == KFD_EVENT_TYPE_MEMORY) {
676 dst = &data[waiter->input_index].memory_exception_data;
677 src = &event->memory_exception_data;
678 if (copy_to_user(dst, src,
679 sizeof(struct kfd_hsa_memory_exception_data)))
680 return false;
681 }
682 }
683
684 return true;
685
686 }
687
688
689
user_timeout_to_jiffies(uint32_t user_timeout_ms)690 static long user_timeout_to_jiffies(uint32_t user_timeout_ms)
691 {
692 if (user_timeout_ms == KFD_EVENT_TIMEOUT_IMMEDIATE)
693 return 0;
694
695 if (user_timeout_ms == KFD_EVENT_TIMEOUT_INFINITE)
696 return MAX_SCHEDULE_TIMEOUT;
697
698 /*
699 * msecs_to_jiffies interprets all values above 2^31-1 as infinite,
700 * but we consider them finite.
701 * This hack is wrong, but nobody is likely to notice.
702 */
703 user_timeout_ms = min_t(uint32_t, user_timeout_ms, 0x7FFFFFFF);
704
705 return msecs_to_jiffies(user_timeout_ms) + 1;
706 }
707
free_waiters(uint32_t num_events,struct kfd_event_waiter * waiters)708 static void free_waiters(uint32_t num_events, struct kfd_event_waiter *waiters)
709 {
710 uint32_t i;
711
712 for (i = 0; i < num_events; i++)
713 list_del(&waiters[i].waiters);
714
715 kfree(waiters);
716 }
717
kfd_wait_on_events(struct kfd_process * p,uint32_t num_events,void __user * data,bool all,uint32_t user_timeout_ms,enum kfd_event_wait_result * wait_result)718 int kfd_wait_on_events(struct kfd_process *p,
719 uint32_t num_events, void __user *data,
720 bool all, uint32_t user_timeout_ms,
721 enum kfd_event_wait_result *wait_result)
722 {
723 struct kfd_event_data __user *events =
724 (struct kfd_event_data __user *) data;
725 uint32_t i;
726 int ret = 0;
727 struct kfd_event_waiter *event_waiters = NULL;
728 long timeout = user_timeout_to_jiffies(user_timeout_ms);
729
730 mutex_lock(&p->event_mutex);
731
732 event_waiters = alloc_event_waiters(num_events);
733 if (!event_waiters) {
734 ret = -ENOMEM;
735 goto fail;
736 }
737
738 for (i = 0; i < num_events; i++) {
739 struct kfd_event_data event_data;
740
741 if (copy_from_user(&event_data, &events[i],
742 sizeof(struct kfd_event_data))) {
743 ret = -EFAULT;
744 goto fail;
745 }
746
747 ret = init_event_waiter(p, &event_waiters[i],
748 event_data.event_id, i);
749 if (ret)
750 goto fail;
751 }
752
753 mutex_unlock(&p->event_mutex);
754
755 while (true) {
756 if (fatal_signal_pending(current)) {
757 ret = -EINTR;
758 break;
759 }
760
761 if (signal_pending(current)) {
762 /*
763 * This is wrong when a nonzero, non-infinite timeout
764 * is specified. We need to use
765 * ERESTARTSYS_RESTARTBLOCK, but struct restart_block
766 * contains a union with data for each user and it's
767 * in generic kernel code that I don't want to
768 * touch yet.
769 */
770 ret = -ERESTARTSYS;
771 break;
772 }
773
774 if (test_event_condition(all, num_events, event_waiters)) {
775 if (copy_signaled_event_data(num_events,
776 event_waiters, events))
777 *wait_result = KFD_WAIT_COMPLETE;
778 else
779 *wait_result = KFD_WAIT_ERROR;
780 break;
781 }
782
783 if (timeout <= 0) {
784 *wait_result = KFD_WAIT_TIMEOUT;
785 break;
786 }
787
788 timeout = schedule_timeout_interruptible(timeout);
789 }
790 __set_current_state(TASK_RUNNING);
791
792 mutex_lock(&p->event_mutex);
793 free_waiters(num_events, event_waiters);
794 mutex_unlock(&p->event_mutex);
795
796 return ret;
797
798 fail:
799 if (event_waiters)
800 free_waiters(num_events, event_waiters);
801
802 mutex_unlock(&p->event_mutex);
803
804 *wait_result = KFD_WAIT_ERROR;
805
806 return ret;
807 }
808
kfd_event_mmap(struct kfd_process * p,struct vm_area_struct * vma)809 int kfd_event_mmap(struct kfd_process *p, struct vm_area_struct *vma)
810 {
811
812 unsigned int page_index;
813 unsigned long pfn;
814 struct signal_page *page;
815
816 /* check required size is logical */
817 if (get_order(KFD_SIGNAL_EVENT_LIMIT * 8) !=
818 get_order(vma->vm_end - vma->vm_start)) {
819 pr_err("amdkfd: event page mmap requested illegal size\n");
820 return -EINVAL;
821 }
822
823 page_index = vma->vm_pgoff;
824
825 page = lookup_signal_page_by_index(p, page_index);
826 if (!page) {
827 /* Probably KFD bug, but mmap is user-accessible. */
828 pr_debug("signal page could not be found for page_index %u\n",
829 page_index);
830 return -EINVAL;
831 }
832
833 pfn = __pa(page->kernel_address);
834 pfn >>= PAGE_SHIFT;
835
836 vma->vm_flags |= VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_NORESERVE
837 | VM_DONTDUMP | VM_PFNMAP;
838
839 pr_debug("mapping signal page\n");
840 pr_debug(" start user address == 0x%08lx\n", vma->vm_start);
841 pr_debug(" end user address == 0x%08lx\n", vma->vm_end);
842 pr_debug(" pfn == 0x%016lX\n", pfn);
843 pr_debug(" vm_flags == 0x%08lX\n", vma->vm_flags);
844 pr_debug(" size == 0x%08lX\n",
845 vma->vm_end - vma->vm_start);
846
847 page->user_address = (uint64_t __user *)vma->vm_start;
848
849 /* mapping the page to user process */
850 return remap_pfn_range(vma, vma->vm_start, pfn,
851 vma->vm_end - vma->vm_start, vma->vm_page_prot);
852 }
853
854 /*
855 * Assumes that p->event_mutex is held and of course
856 * that p is not going away (current or locked).
857 */
lookup_events_by_type_and_signal(struct kfd_process * p,int type,void * event_data)858 static void lookup_events_by_type_and_signal(struct kfd_process *p,
859 int type, void *event_data)
860 {
861 struct kfd_hsa_memory_exception_data *ev_data;
862 struct kfd_event *ev;
863 int bkt;
864 bool send_signal = true;
865
866 ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
867
868 hash_for_each(p->events, bkt, ev, events)
869 if (ev->type == type) {
870 send_signal = false;
871 dev_dbg(kfd_device,
872 "Event found: id %X type %d",
873 ev->event_id, ev->type);
874 set_event(ev);
875 if (ev->type == KFD_EVENT_TYPE_MEMORY && ev_data)
876 ev->memory_exception_data = *ev_data;
877 }
878
879 /* Send SIGTERM no event of type "type" has been found*/
880 if (send_signal) {
881 if (send_sigterm) {
882 dev_warn(kfd_device,
883 "Sending SIGTERM to HSA Process with PID %d ",
884 p->lead_thread->pid);
885 send_sig(SIGTERM, p->lead_thread, 0);
886 } else {
887 dev_err(kfd_device,
888 "HSA Process (PID %d) got unhandled exception",
889 p->lead_thread->pid);
890 }
891 }
892 }
893
kfd_signal_iommu_event(struct kfd_dev * dev,unsigned int pasid,unsigned long address,bool is_write_requested,bool is_execute_requested)894 void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
895 unsigned long address, bool is_write_requested,
896 bool is_execute_requested)
897 {
898 struct kfd_hsa_memory_exception_data memory_exception_data;
899 struct vm_area_struct *vma;
900
901 /*
902 * Because we are called from arbitrary context (workqueue) as opposed
903 * to process context, kfd_process could attempt to exit while we are
904 * running so the lookup function returns a locked process.
905 */
906 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
907
908 if (!p)
909 return; /* Presumably process exited. */
910
911 memset(&memory_exception_data, 0, sizeof(memory_exception_data));
912
913 down_read(&p->mm->mmap_sem);
914 vma = find_vma(p->mm, address);
915
916 memory_exception_data.gpu_id = dev->id;
917 memory_exception_data.va = address;
918 /* Set failure reason */
919 memory_exception_data.failure.NotPresent = 1;
920 memory_exception_data.failure.NoExecute = 0;
921 memory_exception_data.failure.ReadOnly = 0;
922 if (vma) {
923 if (vma->vm_start > address) {
924 memory_exception_data.failure.NotPresent = 1;
925 memory_exception_data.failure.NoExecute = 0;
926 memory_exception_data.failure.ReadOnly = 0;
927 } else {
928 memory_exception_data.failure.NotPresent = 0;
929 if (is_write_requested && !(vma->vm_flags & VM_WRITE))
930 memory_exception_data.failure.ReadOnly = 1;
931 else
932 memory_exception_data.failure.ReadOnly = 0;
933 if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
934 memory_exception_data.failure.NoExecute = 1;
935 else
936 memory_exception_data.failure.NoExecute = 0;
937 }
938 }
939
940 up_read(&p->mm->mmap_sem);
941
942 mutex_lock(&p->event_mutex);
943
944 /* Lookup events by type and signal them */
945 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
946 &memory_exception_data);
947
948 mutex_unlock(&p->event_mutex);
949 mutex_unlock(&p->mutex);
950 }
951
kfd_signal_hw_exception_event(unsigned int pasid)952 void kfd_signal_hw_exception_event(unsigned int pasid)
953 {
954 /*
955 * Because we are called from arbitrary context (workqueue) as opposed
956 * to process context, kfd_process could attempt to exit while we are
957 * running so the lookup function returns a locked process.
958 */
959 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
960
961 if (!p)
962 return; /* Presumably process exited. */
963
964 mutex_lock(&p->event_mutex);
965
966 /* Lookup events by type and signal them */
967 lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_HW_EXCEPTION, NULL);
968
969 mutex_unlock(&p->event_mutex);
970 mutex_unlock(&p->mutex);
971 }
972