Lines Matching refs:ctx
261 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) in aio_private_file() argument
269 inode->i_mapping->private_data = ctx; in aio_private_file()
309 static void put_aio_ring_file(struct kioctx *ctx) in put_aio_ring_file() argument
311 struct file *aio_ring_file = ctx->aio_ring_file; in put_aio_ring_file()
321 ctx->aio_ring_file = NULL; in put_aio_ring_file()
328 static void aio_free_ring(struct kioctx *ctx) in aio_free_ring() argument
335 put_aio_ring_file(ctx); in aio_free_ring()
337 for (i = 0; i < ctx->nr_pages; i++) { in aio_free_ring()
340 page_count(ctx->ring_pages[i])); in aio_free_ring()
341 page = ctx->ring_pages[i]; in aio_free_ring()
344 ctx->ring_pages[i] = NULL; in aio_free_ring()
348 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { in aio_free_ring()
349 kfree(ctx->ring_pages); in aio_free_ring()
350 ctx->ring_pages = NULL; in aio_free_ring()
368 struct kioctx *ctx; in aio_ring_mremap() local
370 ctx = rcu_dereference(table->table[i]); in aio_ring_mremap()
371 if (ctx && ctx->aio_ring_file == file) { in aio_ring_mremap()
372 if (!atomic_read(&ctx->dead)) { in aio_ring_mremap()
373 ctx->user_id = ctx->mmap_base = vma->vm_start; in aio_ring_mremap()
410 struct kioctx *ctx; in aio_migrate_folio() local
427 ctx = mapping->private_data; in aio_migrate_folio()
428 if (!ctx) { in aio_migrate_folio()
437 if (!mutex_trylock(&ctx->ring_lock)) { in aio_migrate_folio()
443 if (idx < (pgoff_t)ctx->nr_pages) { in aio_migrate_folio()
445 if (ctx->ring_pages[idx] != &src->page) in aio_migrate_folio()
467 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migrate_folio()
469 BUG_ON(ctx->ring_pages[idx] != &src->page); in aio_migrate_folio()
470 ctx->ring_pages[idx] = &dst->page; in aio_migrate_folio()
471 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migrate_folio()
477 mutex_unlock(&ctx->ring_lock); in aio_migrate_folio()
491 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) in aio_setup_ring() argument
510 file = aio_private_file(ctx, nr_pages); in aio_setup_ring()
512 ctx->aio_ring_file = NULL; in aio_setup_ring()
516 ctx->aio_ring_file = file; in aio_setup_ring()
520 ctx->ring_pages = ctx->internal_pages; in aio_setup_ring()
522 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), in aio_setup_ring()
524 if (!ctx->ring_pages) { in aio_setup_ring()
525 put_aio_ring_file(ctx); in aio_setup_ring()
541 ctx->ring_pages[i] = page; in aio_setup_ring()
543 ctx->nr_pages = i; in aio_setup_ring()
546 aio_free_ring(ctx); in aio_setup_ring()
550 ctx->mmap_size = nr_pages * PAGE_SIZE; in aio_setup_ring()
551 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); in aio_setup_ring()
554 ctx->mmap_size = 0; in aio_setup_ring()
555 aio_free_ring(ctx); in aio_setup_ring()
559 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, in aio_setup_ring()
563 if (IS_ERR((void *)ctx->mmap_base)) { in aio_setup_ring()
564 ctx->mmap_size = 0; in aio_setup_ring()
565 aio_free_ring(ctx); in aio_setup_ring()
569 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); in aio_setup_ring()
571 ctx->user_id = ctx->mmap_base; in aio_setup_ring()
572 ctx->nr_events = nr_events; /* trusted copy */ in aio_setup_ring()
574 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring()
583 flush_dcache_page(ctx->ring_pages[0]); in aio_setup_ring()
595 struct kioctx *ctx = req->ki_ctx; in kiocb_set_cancel_fn() local
608 spin_lock_irqsave(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
609 list_add_tail(&req->ki_list, &ctx->active_reqs); in kiocb_set_cancel_fn()
611 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
622 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, in free_ioctx() local
624 pr_debug("freeing %p\n", ctx); in free_ioctx()
626 aio_free_ring(ctx); in free_ioctx()
627 free_percpu(ctx->cpu); in free_ioctx()
628 percpu_ref_exit(&ctx->reqs); in free_ioctx()
629 percpu_ref_exit(&ctx->users); in free_ioctx()
630 kmem_cache_free(kioctx_cachep, ctx); in free_ioctx()
635 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); in free_ioctx_reqs() local
638 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) in free_ioctx_reqs()
639 complete(&ctx->rq_wait->comp); in free_ioctx_reqs()
642 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); in free_ioctx_reqs()
643 queue_rcu_work(system_wq, &ctx->free_rwork); in free_ioctx_reqs()
653 struct kioctx *ctx = container_of(ref, struct kioctx, users); in free_ioctx_users() local
656 spin_lock_irq(&ctx->ctx_lock); in free_ioctx_users()
658 while (!list_empty(&ctx->active_reqs)) { in free_ioctx_users()
659 req = list_first_entry(&ctx->active_reqs, in free_ioctx_users()
665 spin_unlock_irq(&ctx->ctx_lock); in free_ioctx_users()
667 percpu_ref_kill(&ctx->reqs); in free_ioctx_users()
668 percpu_ref_put(&ctx->reqs); in free_ioctx_users()
671 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) in ioctx_add_table() argument
684 ctx->id = i; in ioctx_add_table()
685 rcu_assign_pointer(table->table[i], ctx); in ioctx_add_table()
692 ring = kmap_atomic(ctx->ring_pages[0]); in ioctx_add_table()
693 ring->id = ctx->id; in ioctx_add_table()
741 struct kioctx *ctx; in ioctx_alloc() local
771 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); in ioctx_alloc()
772 if (!ctx) in ioctx_alloc()
775 ctx->max_reqs = max_reqs; in ioctx_alloc()
777 spin_lock_init(&ctx->ctx_lock); in ioctx_alloc()
778 spin_lock_init(&ctx->completion_lock); in ioctx_alloc()
779 mutex_init(&ctx->ring_lock); in ioctx_alloc()
782 mutex_lock(&ctx->ring_lock); in ioctx_alloc()
783 init_waitqueue_head(&ctx->wait); in ioctx_alloc()
785 INIT_LIST_HEAD(&ctx->active_reqs); in ioctx_alloc()
787 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) in ioctx_alloc()
790 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) in ioctx_alloc()
793 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc()
794 if (!ctx->cpu) in ioctx_alloc()
797 err = aio_setup_ring(ctx, nr_events); in ioctx_alloc()
801 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); in ioctx_alloc()
802 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); in ioctx_alloc()
803 if (ctx->req_batch < 1) in ioctx_alloc()
804 ctx->req_batch = 1; in ioctx_alloc()
808 if (aio_nr + ctx->max_reqs > aio_max_nr || in ioctx_alloc()
809 aio_nr + ctx->max_reqs < aio_nr) { in ioctx_alloc()
814 aio_nr += ctx->max_reqs; in ioctx_alloc()
817 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ in ioctx_alloc()
818 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ in ioctx_alloc()
820 err = ioctx_add_table(ctx, mm); in ioctx_alloc()
825 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
828 ctx, ctx->user_id, mm, ctx->nr_events); in ioctx_alloc()
829 return ctx; in ioctx_alloc()
832 aio_nr_sub(ctx->max_reqs); in ioctx_alloc()
834 atomic_set(&ctx->dead, 1); in ioctx_alloc()
835 if (ctx->mmap_size) in ioctx_alloc()
836 vm_munmap(ctx->mmap_base, ctx->mmap_size); in ioctx_alloc()
837 aio_free_ring(ctx); in ioctx_alloc()
839 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
840 free_percpu(ctx->cpu); in ioctx_alloc()
841 percpu_ref_exit(&ctx->reqs); in ioctx_alloc()
842 percpu_ref_exit(&ctx->users); in ioctx_alloc()
843 kmem_cache_free(kioctx_cachep, ctx); in ioctx_alloc()
853 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, in kill_ioctx() argument
859 if (atomic_xchg(&ctx->dead, 1)) { in kill_ioctx()
865 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); in kill_ioctx()
866 RCU_INIT_POINTER(table->table[ctx->id], NULL); in kill_ioctx()
870 wake_up_all(&ctx->wait); in kill_ioctx()
879 aio_nr_sub(ctx->max_reqs); in kill_ioctx()
881 if (ctx->mmap_size) in kill_ioctx()
882 vm_munmap(ctx->mmap_base, ctx->mmap_size); in kill_ioctx()
884 ctx->rq_wait = wait; in kill_ioctx()
885 percpu_ref_kill(&ctx->users); in kill_ioctx()
911 struct kioctx *ctx = in exit_aio() local
914 if (!ctx) { in exit_aio()
926 ctx->mmap_size = 0; in exit_aio()
927 kill_ioctx(mm, ctx, &wait); in exit_aio()
939 static void put_reqs_available(struct kioctx *ctx, unsigned nr) in put_reqs_available() argument
945 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available()
948 while (kcpu->reqs_available >= ctx->req_batch * 2) { in put_reqs_available()
949 kcpu->reqs_available -= ctx->req_batch; in put_reqs_available()
950 atomic_add(ctx->req_batch, &ctx->reqs_available); in put_reqs_available()
956 static bool __get_reqs_available(struct kioctx *ctx) in __get_reqs_available() argument
963 kcpu = this_cpu_ptr(ctx->cpu); in __get_reqs_available()
965 int avail = atomic_read(&ctx->reqs_available); in __get_reqs_available()
968 if (avail < ctx->req_batch) in __get_reqs_available()
970 } while (!atomic_try_cmpxchg(&ctx->reqs_available, in __get_reqs_available()
971 &avail, avail - ctx->req_batch)); in __get_reqs_available()
973 kcpu->reqs_available += ctx->req_batch; in __get_reqs_available()
990 static void refill_reqs_available(struct kioctx *ctx, unsigned head, in refill_reqs_available() argument
996 head %= ctx->nr_events; in refill_reqs_available()
1000 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available()
1002 completed = ctx->completed_events; in refill_reqs_available()
1011 ctx->completed_events -= completed; in refill_reqs_available()
1012 put_reqs_available(ctx, completed); in refill_reqs_available()
1019 static void user_refill_reqs_available(struct kioctx *ctx) in user_refill_reqs_available() argument
1021 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available()
1022 if (ctx->completed_events) { in user_refill_reqs_available()
1035 ring = kmap_atomic(ctx->ring_pages[0]); in user_refill_reqs_available()
1039 refill_reqs_available(ctx, head, ctx->tail); in user_refill_reqs_available()
1042 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available()
1045 static bool get_reqs_available(struct kioctx *ctx) in get_reqs_available() argument
1047 if (__get_reqs_available(ctx)) in get_reqs_available()
1049 user_refill_reqs_available(ctx); in get_reqs_available()
1050 return __get_reqs_available(ctx); in get_reqs_available()
1060 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) in aio_get_req() argument
1068 if (unlikely(!get_reqs_available(ctx))) { in aio_get_req()
1073 percpu_ref_get(&ctx->reqs); in aio_get_req()
1074 req->ki_ctx = ctx; in aio_get_req()
1085 struct kioctx *ctx, *ret = NULL; in lookup_ioctx() local
1099 ctx = rcu_dereference(table->table[id]); in lookup_ioctx()
1100 if (ctx && ctx->user_id == ctx_id) { in lookup_ioctx()
1101 if (percpu_ref_tryget_live(&ctx->users)) in lookup_ioctx()
1102 ret = ctx; in lookup_ioctx()
1124 struct kioctx *ctx = iocb->ki_ctx; in aio_complete() local
1135 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_complete()
1137 tail = ctx->tail; in aio_complete()
1140 if (++tail >= ctx->nr_events) in aio_complete()
1143 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1149 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1151 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, in aio_complete()
1160 ctx->tail = tail; in aio_complete()
1162 ring = kmap_atomic(ctx->ring_pages[0]); in aio_complete()
1166 flush_dcache_page(ctx->ring_pages[0]); in aio_complete()
1168 ctx->completed_events++; in aio_complete()
1169 if (ctx->completed_events > 1) in aio_complete()
1170 refill_reqs_available(ctx, head, tail); in aio_complete()
1171 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_complete()
1191 if (waitqueue_active(&ctx->wait)) in aio_complete()
1192 wake_up(&ctx->wait); in aio_complete()
1207 static long aio_read_events_ring(struct kioctx *ctx, in aio_read_events_ring() argument
1222 mutex_lock(&ctx->ring_lock); in aio_read_events_ring()
1225 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1236 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); in aio_read_events_ring()
1241 head %= ctx->nr_events; in aio_read_events_ring()
1242 tail %= ctx->nr_events; in aio_read_events_ring()
1249 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1254 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; in aio_read_events_ring()
1272 head %= ctx->nr_events; in aio_read_events_ring()
1275 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1278 flush_dcache_page(ctx->ring_pages[0]); in aio_read_events_ring()
1282 mutex_unlock(&ctx->ring_lock); in aio_read_events_ring()
1287 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, in aio_read_events() argument
1290 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); in aio_read_events()
1295 if (unlikely(atomic_read(&ctx->dead))) in aio_read_events()
1304 static long read_events(struct kioctx *ctx, long min_nr, long nr, in read_events() argument
1325 aio_read_events(ctx, min_nr, nr, event, &ret); in read_events()
1327 wait_event_interruptible_hrtimeout(ctx->wait, in read_events()
1328 aio_read_events(ctx, min_nr, nr, event, &ret), in read_events()
1349 unsigned long ctx; in SYSCALL_DEFINE2() local
1352 ret = get_user(ctx, ctxp); in SYSCALL_DEFINE2()
1357 if (unlikely(ctx || nr_events == 0)) { in SYSCALL_DEFINE2()
1359 ctx, nr_events); in SYSCALL_DEFINE2()
1380 unsigned long ctx; in COMPAT_SYSCALL_DEFINE2() local
1383 ret = get_user(ctx, ctx32p); in COMPAT_SYSCALL_DEFINE2()
1388 if (unlikely(ctx || nr_events == 0)) { in COMPAT_SYSCALL_DEFINE2()
1390 ctx, nr_events); in COMPAT_SYSCALL_DEFINE2()
1415 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) in SYSCALL_DEFINE1() argument
1417 struct kioctx *ioctx = lookup_ioctx(ctx); in SYSCALL_DEFINE1()
1447 struct kioctx *ctx = iocb->ki_ctx; in aio_remove_iocb() local
1450 spin_lock_irqsave(&ctx->ctx_lock, flags); in aio_remove_iocb()
1452 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_remove_iocb()
1706 struct kioctx *ctx = iocb->ki_ctx; in aio_poll_complete_work() local
1719 spin_lock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1733 spin_unlock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1741 spin_unlock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1791 struct kioctx *ctx = iocb->ki_ctx; in aio_poll_wake() local
1801 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_poll_wake()
1873 struct kioctx *ctx = aiocb->ki_ctx; in aio_poll() local
1905 spin_lock_irq(&ctx->ctx_lock); in aio_poll()
1930 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); in aio_poll()
1940 spin_unlock_irq(&ctx->ctx_lock); in aio_poll()
1946 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, in __io_submit_one() argument
2000 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, in io_submit_one() argument
2026 req = aio_get_req(ctx); in io_submit_one()
2030 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); in io_submit_one()
2042 put_reqs_available(ctx, 1); in io_submit_one()
2062 struct kioctx *ctx; in SYSCALL_DEFINE3() local
2070 ctx = lookup_ioctx(ctx_id); in SYSCALL_DEFINE3()
2071 if (unlikely(!ctx)) { in SYSCALL_DEFINE3()
2076 if (nr > ctx->nr_events) in SYSCALL_DEFINE3()
2077 nr = ctx->nr_events; in SYSCALL_DEFINE3()
2089 ret = io_submit_one(ctx, user_iocb, false); in SYSCALL_DEFINE3()
2096 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()
2104 struct kioctx *ctx; in COMPAT_SYSCALL_DEFINE3() local
2112 ctx = lookup_ioctx(ctx_id); in COMPAT_SYSCALL_DEFINE3()
2113 if (unlikely(!ctx)) { in COMPAT_SYSCALL_DEFINE3()
2118 if (nr > ctx->nr_events) in COMPAT_SYSCALL_DEFINE3()
2119 nr = ctx->nr_events; in COMPAT_SYSCALL_DEFINE3()
2131 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); in COMPAT_SYSCALL_DEFINE3()
2138 percpu_ref_put(&ctx->users); in COMPAT_SYSCALL_DEFINE3()
2156 struct kioctx *ctx; in SYSCALL_DEFINE3() local
2167 ctx = lookup_ioctx(ctx_id); in SYSCALL_DEFINE3()
2168 if (unlikely(!ctx)) in SYSCALL_DEFINE3()
2171 spin_lock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
2173 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { in SYSCALL_DEFINE3()
2180 spin_unlock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
2191 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()