Lines Matching refs:ctx
83 struct userfaultfd_ctx *ctx; member
92 struct userfaultfd_ctx *ctx; member
104 static bool userfaultfd_is_initialized(struct userfaultfd_ctx *ctx) in userfaultfd_is_initialized() argument
106 return ctx->features & UFFD_FEATURE_INITIALIZED; in userfaultfd_is_initialized()
154 static void userfaultfd_ctx_get(struct userfaultfd_ctx *ctx) in userfaultfd_ctx_get() argument
156 refcount_inc(&ctx->refcount); in userfaultfd_ctx_get()
161 struct userfaultfd_ctx *ctx = container_of(head, struct userfaultfd_ctx, in __free_userfaultfd_ctx() local
163 kmem_cache_free(userfaultfd_ctx_cachep, ctx); in __free_userfaultfd_ctx()
174 static void userfaultfd_ctx_put(struct userfaultfd_ctx *ctx) in userfaultfd_ctx_put() argument
176 if (refcount_dec_and_test(&ctx->refcount)) { in userfaultfd_ctx_put()
177 VM_BUG_ON(spin_is_locked(&ctx->fault_pending_wqh.lock)); in userfaultfd_ctx_put()
178 VM_BUG_ON(waitqueue_active(&ctx->fault_pending_wqh)); in userfaultfd_ctx_put()
179 VM_BUG_ON(spin_is_locked(&ctx->fault_wqh.lock)); in userfaultfd_ctx_put()
180 VM_BUG_ON(waitqueue_active(&ctx->fault_wqh)); in userfaultfd_ctx_put()
181 VM_BUG_ON(spin_is_locked(&ctx->event_wqh.lock)); in userfaultfd_ctx_put()
182 VM_BUG_ON(waitqueue_active(&ctx->event_wqh)); in userfaultfd_ctx_put()
183 VM_BUG_ON(spin_is_locked(&ctx->fd_wqh.lock)); in userfaultfd_ctx_put()
184 VM_BUG_ON(waitqueue_active(&ctx->fd_wqh)); in userfaultfd_ctx_put()
185 mmdrop(ctx->mm); in userfaultfd_ctx_put()
186 call_rcu(&ctx->rcu_head, __free_userfaultfd_ctx); in userfaultfd_ctx_put()
234 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, in userfaultfd_huge_must_wait() argument
240 struct mm_struct *mm = ctx->mm; in userfaultfd_huge_must_wait()
266 static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx, in userfaultfd_huge_must_wait() argument
283 static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, in userfaultfd_must_wait() argument
288 struct mm_struct *mm = ctx->mm; in userfaultfd_must_wait()
363 struct userfaultfd_ctx *ctx; in userfaultfd_using_sigbus() local
371 ctx = rcu_dereference(vma->vm_userfaultfd_ctx.ctx); in userfaultfd_using_sigbus()
372 ret = ctx && (ctx->features & UFFD_FEATURE_SIGBUS); in userfaultfd_using_sigbus()
396 struct userfaultfd_ctx *ctx; in handle_userfault() local
422 ctx = rcu_dereference_protected(vmf->vma->vm_userfaultfd_ctx.ctx, in handle_userfault()
424 if (!ctx) in handle_userfault()
427 BUG_ON(ctx->mm != mm); in handle_userfault()
434 if (ctx->features & UFFD_FEATURE_SIGBUS) in handle_userfault()
437 ctx->flags & UFFD_USER_MODE_ONLY) { in handle_userfault()
449 if (unlikely(READ_ONCE(ctx->released))) { in handle_userfault()
507 userfaultfd_ctx_get(ctx); in handle_userfault()
512 ctx->features); in handle_userfault()
513 uwq.ctx = ctx; in handle_userfault()
518 spin_lock_irq(&ctx->fault_pending_wqh.lock); in handle_userfault()
523 __add_wait_queue(&ctx->fault_pending_wqh, &uwq.wq); in handle_userfault()
530 spin_unlock_irq(&ctx->fault_pending_wqh.lock); in handle_userfault()
533 must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags, in handle_userfault()
536 must_wait = userfaultfd_huge_must_wait(ctx, vmf->vma, in handle_userfault()
541 if (likely(must_wait && !READ_ONCE(ctx->released))) { in handle_userfault()
542 wake_up_poll(&ctx->fd_wqh, EPOLLIN); in handle_userfault()
562 spin_lock_irq(&ctx->fault_pending_wqh.lock); in handle_userfault()
568 spin_unlock_irq(&ctx->fault_pending_wqh.lock); in handle_userfault()
575 userfaultfd_ctx_put(ctx); in handle_userfault()
581 static void userfaultfd_event_wait_completion(struct userfaultfd_ctx *ctx, in userfaultfd_event_wait_completion() argument
589 ewq->ctx = ctx; in userfaultfd_event_wait_completion()
593 spin_lock_irq(&ctx->event_wqh.lock); in userfaultfd_event_wait_completion()
598 __add_wait_queue(&ctx->event_wqh, &ewq->wq); in userfaultfd_event_wait_completion()
603 if (READ_ONCE(ctx->released) || in userfaultfd_event_wait_completion()
611 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); in userfaultfd_event_wait_completion()
623 spin_unlock_irq(&ctx->event_wqh.lock); in userfaultfd_event_wait_completion()
625 wake_up_poll(&ctx->fd_wqh, EPOLLIN); in userfaultfd_event_wait_completion()
628 spin_lock_irq(&ctx->event_wqh.lock); in userfaultfd_event_wait_completion()
631 spin_unlock_irq(&ctx->event_wqh.lock); in userfaultfd_event_wait_completion()
640 if (rcu_access_pointer(vma->vm_userfaultfd_ctx.ctx) == in userfaultfd_event_wait_completion()
642 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, in userfaultfd_event_wait_completion()
656 WRITE_ONCE(ctx->mmap_changing, false); in userfaultfd_event_wait_completion()
657 userfaultfd_ctx_put(ctx); in userfaultfd_event_wait_completion()
660 static void userfaultfd_event_complete(struct userfaultfd_ctx *ctx, in userfaultfd_event_complete() argument
664 wake_up_locked(&ctx->event_wqh); in userfaultfd_event_complete()
665 __remove_wait_queue(&ctx->event_wqh, &ewq->wq); in userfaultfd_event_complete()
670 struct userfaultfd_ctx *ctx = NULL, *octx; in dup_userfaultfd() local
674 vma->vm_userfaultfd_ctx.ctx, in dup_userfaultfd()
679 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL); in dup_userfaultfd()
688 ctx = fctx->new; in dup_userfaultfd()
692 if (!ctx) { in dup_userfaultfd()
697 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); in dup_userfaultfd()
698 if (!ctx) { in dup_userfaultfd()
703 refcount_set(&ctx->refcount, 1); in dup_userfaultfd()
704 ctx->flags = octx->flags; in dup_userfaultfd()
705 ctx->features = octx->features; in dup_userfaultfd()
706 ctx->released = false; in dup_userfaultfd()
707 ctx->mmap_changing = false; in dup_userfaultfd()
708 ctx->mm = vma->vm_mm; in dup_userfaultfd()
709 mmgrab(ctx->mm); in dup_userfaultfd()
714 fctx->new = ctx; in dup_userfaultfd()
718 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx); in dup_userfaultfd()
724 struct userfaultfd_ctx *ctx = fctx->orig; in dup_fctx() local
732 userfaultfd_event_wait_completion(ctx, &ewq); in dup_fctx()
749 struct userfaultfd_ctx *ctx; in mremap_userfaultfd_prep() local
751 ctx = rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx, in mremap_userfaultfd_prep()
754 if (!ctx) in mremap_userfaultfd_prep()
757 if (ctx->features & UFFD_FEATURE_EVENT_REMAP) { in mremap_userfaultfd_prep()
758 vm_ctx->ctx = ctx; in mremap_userfaultfd_prep()
759 userfaultfd_ctx_get(ctx); in mremap_userfaultfd_prep()
760 WRITE_ONCE(ctx->mmap_changing, true); in mremap_userfaultfd_prep()
763 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL); in mremap_userfaultfd_prep()
772 struct userfaultfd_ctx *ctx = vm_ctx->ctx; in mremap_userfaultfd_complete() local
775 if (!ctx) in mremap_userfaultfd_complete()
779 userfaultfd_ctx_put(ctx); in mremap_userfaultfd_complete()
790 userfaultfd_event_wait_completion(ctx, &ewq); in mremap_userfaultfd_complete()
797 struct userfaultfd_ctx *ctx; in userfaultfd_remove() local
800 ctx = rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx, in userfaultfd_remove()
802 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_REMOVE)) in userfaultfd_remove()
805 userfaultfd_ctx_get(ctx); in userfaultfd_remove()
806 WRITE_ONCE(ctx->mmap_changing, true); in userfaultfd_remove()
815 userfaultfd_event_wait_completion(ctx, &ewq); in userfaultfd_remove()
820 static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, in has_unmap_ctx() argument
826 if (unmap_ctx->ctx == ctx && unmap_ctx->start == start && in has_unmap_ctx()
839 struct userfaultfd_ctx *ctx = in userfaultfd_unmap_prep() local
840 rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx, in userfaultfd_unmap_prep()
843 if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || in userfaultfd_unmap_prep()
844 has_unmap_ctx(ctx, unmaps, start, end)) in userfaultfd_unmap_prep()
851 userfaultfd_ctx_get(ctx); in userfaultfd_unmap_prep()
852 WRITE_ONCE(ctx->mmap_changing, true); in userfaultfd_unmap_prep()
853 unmap_ctx->ctx = ctx; in userfaultfd_unmap_prep()
864 struct userfaultfd_unmap_ctx *ctx, *n; in userfaultfd_unmap_complete() local
867 list_for_each_entry_safe(ctx, n, uf, list) { in userfaultfd_unmap_complete()
871 ewq.msg.arg.remove.start = ctx->start; in userfaultfd_unmap_complete()
872 ewq.msg.arg.remove.end = ctx->end; in userfaultfd_unmap_complete()
874 userfaultfd_event_wait_completion(ctx->ctx, &ewq); in userfaultfd_unmap_complete()
876 list_del(&ctx->list); in userfaultfd_unmap_complete()
877 kfree(ctx); in userfaultfd_unmap_complete()
883 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_release() local
884 struct mm_struct *mm = ctx->mm; in userfaultfd_release()
890 WRITE_ONCE(ctx->released, true); in userfaultfd_release()
907 rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx, in userfaultfd_release()
912 if (cur_uffd_ctx != ctx) { in userfaultfd_release()
929 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL); in userfaultfd_release()
940 spin_lock_irq(&ctx->fault_pending_wqh.lock); in userfaultfd_release()
941 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, &range); in userfaultfd_release()
942 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, &range); in userfaultfd_release()
943 spin_unlock_irq(&ctx->fault_pending_wqh.lock); in userfaultfd_release()
946 wake_up_all(&ctx->event_wqh); in userfaultfd_release()
948 wake_up_poll(&ctx->fd_wqh, EPOLLHUP); in userfaultfd_release()
949 userfaultfd_ctx_put(ctx); in userfaultfd_release()
973 struct userfaultfd_ctx *ctx) in find_userfault() argument
975 return find_userfault_in(&ctx->fault_pending_wqh); in find_userfault()
979 struct userfaultfd_ctx *ctx) in find_userfault_evt() argument
981 return find_userfault_in(&ctx->event_wqh); in find_userfault_evt()
986 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_poll() local
989 poll_wait(file, &ctx->fd_wqh, wait); in userfaultfd_poll()
991 if (!userfaultfd_is_initialized(ctx)) in userfaultfd_poll()
1012 if (waitqueue_active(&ctx->fault_pending_wqh)) in userfaultfd_poll()
1014 else if (waitqueue_active(&ctx->event_wqh)) in userfaultfd_poll()
1038 static ssize_t userfaultfd_ctx_read(struct userfaultfd_ctx *ctx, int no_wait, in userfaultfd_ctx_read() argument
1055 spin_lock_irq(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
1056 __add_wait_queue(&ctx->fd_wqh, &wait); in userfaultfd_ctx_read()
1059 spin_lock(&ctx->fault_pending_wqh.lock); in userfaultfd_ctx_read()
1060 uwq = find_userfault(ctx); in userfaultfd_ctx_read()
1069 write_seqcount_begin(&ctx->refile_seq); in userfaultfd_ctx_read()
1093 add_wait_queue(&ctx->fault_wqh, &uwq->wq); in userfaultfd_ctx_read()
1095 write_seqcount_end(&ctx->refile_seq); in userfaultfd_ctx_read()
1099 spin_unlock(&ctx->fault_pending_wqh.lock); in userfaultfd_ctx_read()
1103 spin_unlock(&ctx->fault_pending_wqh.lock); in userfaultfd_ctx_read()
1105 spin_lock(&ctx->event_wqh.lock); in userfaultfd_ctx_read()
1106 uwq = find_userfault_evt(ctx); in userfaultfd_ctx_read()
1121 spin_unlock(&ctx->event_wqh.lock); in userfaultfd_ctx_read()
1126 userfaultfd_event_complete(ctx, uwq); in userfaultfd_ctx_read()
1127 spin_unlock(&ctx->event_wqh.lock); in userfaultfd_ctx_read()
1131 spin_unlock(&ctx->event_wqh.lock); in userfaultfd_ctx_read()
1141 spin_unlock_irq(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
1143 spin_lock_irq(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
1145 __remove_wait_queue(&ctx->fd_wqh, &wait); in userfaultfd_ctx_read()
1147 spin_unlock_irq(&ctx->fd_wqh.lock); in userfaultfd_ctx_read()
1151 spin_lock_irq(&ctx->event_wqh.lock); in userfaultfd_ctx_read()
1173 __add_wait_queue(&ctx->event_wqh, &uwq->wq); in userfaultfd_ctx_read()
1181 userfaultfd_event_complete(ctx, uwq); in userfaultfd_ctx_read()
1197 spin_unlock_irq(&ctx->event_wqh.lock); in userfaultfd_ctx_read()
1206 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_read() local
1212 if (!userfaultfd_is_initialized(ctx)) in userfaultfd_read()
1218 _ret = userfaultfd_ctx_read(ctx, no_wait, &msg, inode); in userfaultfd_read()
1234 static void __wake_userfault(struct userfaultfd_ctx *ctx, in __wake_userfault() argument
1237 spin_lock_irq(&ctx->fault_pending_wqh.lock); in __wake_userfault()
1239 if (waitqueue_active(&ctx->fault_pending_wqh)) in __wake_userfault()
1240 __wake_up_locked_key(&ctx->fault_pending_wqh, TASK_NORMAL, in __wake_userfault()
1242 if (waitqueue_active(&ctx->fault_wqh)) in __wake_userfault()
1243 __wake_up(&ctx->fault_wqh, TASK_NORMAL, 1, range); in __wake_userfault()
1244 spin_unlock_irq(&ctx->fault_pending_wqh.lock); in __wake_userfault()
1247 static __always_inline void wake_userfault(struct userfaultfd_ctx *ctx, in wake_userfault() argument
1269 seq = read_seqcount_begin(&ctx->refile_seq); in wake_userfault()
1270 need_wakeup = waitqueue_active(&ctx->fault_pending_wqh) || in wake_userfault()
1271 waitqueue_active(&ctx->fault_wqh); in wake_userfault()
1273 } while (read_seqcount_retry(&ctx->refile_seq, seq)); in wake_userfault()
1275 __wake_userfault(ctx, range); in wake_userfault()
1316 static int userfaultfd_register(struct userfaultfd_ctx *ctx, in userfaultfd_register() argument
1319 struct mm_struct *mm = ctx->mm; in userfaultfd_register()
1393 rcu_dereference_protected(cur->vm_userfaultfd_ctx.ctx, in userfaultfd_register()
1440 if (cur_uffd_ctx && cur_uffd_ctx != ctx) in userfaultfd_register()
1459 rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx, in userfaultfd_register()
1464 BUG_ON(cur_uffd_ctx && cur_uffd_ctx != ctx); in userfaultfd_register()
1471 if (cur_uffd_ctx == ctx && in userfaultfd_register()
1483 ((struct vm_userfaultfd_ctx){ ctx }), in userfaultfd_register()
1507 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, ctx); in userfaultfd_register()
1550 static int userfaultfd_unregister(struct userfaultfd_ctx *ctx, in userfaultfd_unregister() argument
1553 struct mm_struct *mm = ctx->mm; in userfaultfd_unregister()
1607 BUG_ON(!!rcu_access_pointer(cur->vm_userfaultfd_ctx.ctx) ^ in userfaultfd_unregister()
1630 rcu_dereference_protected(vma->vm_userfaultfd_ctx.ctx, in userfaultfd_unregister()
1690 rcu_assign_pointer(vma->vm_userfaultfd_ctx.ctx, NULL); in userfaultfd_unregister()
1709 static int userfaultfd_wake(struct userfaultfd_ctx *ctx, in userfaultfd_wake() argument
1721 ret = validate_range(ctx->mm, uffdio_wake.start, uffdio_wake.len); in userfaultfd_wake()
1734 wake_userfault(ctx, &range); in userfaultfd_wake()
1741 static int userfaultfd_copy(struct userfaultfd_ctx *ctx, in userfaultfd_copy() argument
1752 if (READ_ONCE(ctx->mmap_changing)) in userfaultfd_copy()
1761 ret = validate_range(ctx->mm, uffdio_copy.dst, uffdio_copy.len); in userfaultfd_copy()
1776 if (mmget_not_zero(ctx->mm)) { in userfaultfd_copy()
1777 ret = mcopy_atomic(ctx->mm, uffdio_copy.dst, uffdio_copy.src, in userfaultfd_copy()
1778 uffdio_copy.len, &ctx->mmap_changing, in userfaultfd_copy()
1780 mmput(ctx->mm); in userfaultfd_copy()
1793 wake_userfault(ctx, &range); in userfaultfd_copy()
1800 static int userfaultfd_zeropage(struct userfaultfd_ctx *ctx, in userfaultfd_zeropage() argument
1811 if (READ_ONCE(ctx->mmap_changing)) in userfaultfd_zeropage()
1820 ret = validate_range(ctx->mm, uffdio_zeropage.range.start, in userfaultfd_zeropage()
1829 if (mmget_not_zero(ctx->mm)) { in userfaultfd_zeropage()
1830 ret = mfill_zeropage(ctx->mm, uffdio_zeropage.range.start, in userfaultfd_zeropage()
1832 &ctx->mmap_changing, uffdio_zeropage.mode); in userfaultfd_zeropage()
1833 mmput(ctx->mm); in userfaultfd_zeropage()
1846 wake_userfault(ctx, &range); in userfaultfd_zeropage()
1853 static int userfaultfd_writeprotect(struct userfaultfd_ctx *ctx, in userfaultfd_writeprotect() argument
1862 if (READ_ONCE(ctx->mmap_changing)) in userfaultfd_writeprotect()
1871 ret = validate_range(ctx->mm, uffdio_wp.range.start, in userfaultfd_writeprotect()
1886 if (mmget_not_zero(ctx->mm)) { in userfaultfd_writeprotect()
1887 ret = mwriteprotect_range(ctx->mm, uffdio_wp.range.start, in userfaultfd_writeprotect()
1889 &ctx->mmap_changing); in userfaultfd_writeprotect()
1890 mmput(ctx->mm); in userfaultfd_writeprotect()
1901 wake_userfault(ctx, &range); in userfaultfd_writeprotect()
1906 static int userfaultfd_continue(struct userfaultfd_ctx *ctx, unsigned long arg) in userfaultfd_continue() argument
1916 if (READ_ONCE(ctx->mmap_changing)) in userfaultfd_continue()
1925 ret = validate_range(ctx->mm, uffdio_continue.range.start, in userfaultfd_continue()
1939 if (mmget_not_zero(ctx->mm)) { in userfaultfd_continue()
1940 ret = mcopy_continue(ctx->mm, uffdio_continue.range.start, in userfaultfd_continue()
1942 &ctx->mmap_changing); in userfaultfd_continue()
1943 mmput(ctx->mm); in userfaultfd_continue()
1958 wake_userfault(ctx, &range); in userfaultfd_continue()
1980 static int userfaultfd_api(struct userfaultfd_ctx *ctx, in userfaultfd_api() argument
2013 if (cmpxchg(&ctx->features, 0, ctx_features) != 0) in userfaultfd_api()
2030 struct userfaultfd_ctx *ctx = file->private_data; in userfaultfd_ioctl() local
2032 if (cmd != UFFDIO_API && !userfaultfd_is_initialized(ctx)) in userfaultfd_ioctl()
2037 ret = userfaultfd_api(ctx, arg); in userfaultfd_ioctl()
2040 ret = userfaultfd_register(ctx, arg); in userfaultfd_ioctl()
2043 ret = userfaultfd_unregister(ctx, arg); in userfaultfd_ioctl()
2046 ret = userfaultfd_wake(ctx, arg); in userfaultfd_ioctl()
2049 ret = userfaultfd_copy(ctx, arg); in userfaultfd_ioctl()
2052 ret = userfaultfd_zeropage(ctx, arg); in userfaultfd_ioctl()
2055 ret = userfaultfd_writeprotect(ctx, arg); in userfaultfd_ioctl()
2058 ret = userfaultfd_continue(ctx, arg); in userfaultfd_ioctl()
2067 struct userfaultfd_ctx *ctx = f->private_data; in userfaultfd_show_fdinfo() local
2071 spin_lock_irq(&ctx->fault_pending_wqh.lock); in userfaultfd_show_fdinfo()
2072 list_for_each_entry(wq, &ctx->fault_pending_wqh.head, entry) { in userfaultfd_show_fdinfo()
2076 list_for_each_entry(wq, &ctx->fault_wqh.head, entry) { in userfaultfd_show_fdinfo()
2079 spin_unlock_irq(&ctx->fault_pending_wqh.lock); in userfaultfd_show_fdinfo()
2087 pending, total, UFFD_API, ctx->features, in userfaultfd_show_fdinfo()
2106 struct userfaultfd_ctx *ctx = (struct userfaultfd_ctx *) mem; in init_once_userfaultfd_ctx() local
2108 init_waitqueue_head(&ctx->fault_pending_wqh); in init_once_userfaultfd_ctx()
2109 init_waitqueue_head(&ctx->fault_wqh); in init_once_userfaultfd_ctx()
2110 init_waitqueue_head(&ctx->event_wqh); in init_once_userfaultfd_ctx()
2111 init_waitqueue_head(&ctx->fd_wqh); in init_once_userfaultfd_ctx()
2112 seqcount_spinlock_init(&ctx->refile_seq, &ctx->fault_pending_wqh.lock); in init_once_userfaultfd_ctx()
2117 struct userfaultfd_ctx *ctx; in SYSCALL_DEFINE1() local
2139 ctx = kmem_cache_alloc(userfaultfd_ctx_cachep, GFP_KERNEL); in SYSCALL_DEFINE1()
2140 if (!ctx) in SYSCALL_DEFINE1()
2143 refcount_set(&ctx->refcount, 1); in SYSCALL_DEFINE1()
2144 ctx->flags = flags; in SYSCALL_DEFINE1()
2145 ctx->features = 0; in SYSCALL_DEFINE1()
2146 ctx->released = false; in SYSCALL_DEFINE1()
2147 ctx->mmap_changing = false; in SYSCALL_DEFINE1()
2148 ctx->mm = current->mm; in SYSCALL_DEFINE1()
2150 mmgrab(ctx->mm); in SYSCALL_DEFINE1()
2152 fd = anon_inode_getfd_secure("[userfaultfd]", &userfaultfd_fops, ctx, in SYSCALL_DEFINE1()
2155 mmdrop(ctx->mm); in SYSCALL_DEFINE1()
2156 kmem_cache_free(userfaultfd_ctx_cachep, ctx); in SYSCALL_DEFINE1()