• Home
  • Raw
  • Download

Lines Matching refs:ctx

236 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)  in aio_private_file()  argument
244 inode->i_mapping->private_data = ctx; in aio_private_file()
283 static void put_aio_ring_file(struct kioctx *ctx) in put_aio_ring_file() argument
285 struct file *aio_ring_file = ctx->aio_ring_file; in put_aio_ring_file()
295 ctx->aio_ring_file = NULL; in put_aio_ring_file()
302 static void aio_free_ring(struct kioctx *ctx) in aio_free_ring() argument
309 put_aio_ring_file(ctx); in aio_free_ring()
311 for (i = 0; i < ctx->nr_pages; i++) { in aio_free_ring()
314 page_count(ctx->ring_pages[i])); in aio_free_ring()
315 page = ctx->ring_pages[i]; in aio_free_ring()
318 ctx->ring_pages[i] = NULL; in aio_free_ring()
322 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { in aio_free_ring()
323 kfree(ctx->ring_pages); in aio_free_ring()
324 ctx->ring_pages = NULL; in aio_free_ring()
342 struct kioctx *ctx; in aio_ring_mremap() local
344 ctx = rcu_dereference(table->table[i]); in aio_ring_mremap()
345 if (ctx && ctx->aio_ring_file == file) { in aio_ring_mremap()
346 if (!atomic_read(&ctx->dead)) { in aio_ring_mremap()
347 ctx->user_id = ctx->mmap_base = vma->vm_start; in aio_ring_mremap()
384 struct kioctx *ctx; in aio_migratepage() local
401 ctx = mapping->private_data; in aio_migratepage()
402 if (!ctx) { in aio_migratepage()
411 if (!mutex_trylock(&ctx->ring_lock)) { in aio_migratepage()
417 if (idx < (pgoff_t)ctx->nr_pages) { in aio_migratepage()
419 if (ctx->ring_pages[idx] != old) in aio_migratepage()
441 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_migratepage()
443 BUG_ON(ctx->ring_pages[idx] != old); in aio_migratepage()
444 ctx->ring_pages[idx] = new; in aio_migratepage()
445 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_migratepage()
451 mutex_unlock(&ctx->ring_lock); in aio_migratepage()
465 static int aio_setup_ring(struct kioctx *ctx, unsigned int nr_events) in aio_setup_ring() argument
484 file = aio_private_file(ctx, nr_pages); in aio_setup_ring()
486 ctx->aio_ring_file = NULL; in aio_setup_ring()
490 ctx->aio_ring_file = file; in aio_setup_ring()
494 ctx->ring_pages = ctx->internal_pages; in aio_setup_ring()
496 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), in aio_setup_ring()
498 if (!ctx->ring_pages) { in aio_setup_ring()
499 put_aio_ring_file(ctx); in aio_setup_ring()
515 ctx->ring_pages[i] = page; in aio_setup_ring()
517 ctx->nr_pages = i; in aio_setup_ring()
520 aio_free_ring(ctx); in aio_setup_ring()
524 ctx->mmap_size = nr_pages * PAGE_SIZE; in aio_setup_ring()
525 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); in aio_setup_ring()
528 ctx->mmap_size = 0; in aio_setup_ring()
529 aio_free_ring(ctx); in aio_setup_ring()
533 ctx->mmap_base = do_mmap(ctx->aio_ring_file, 0, ctx->mmap_size, in aio_setup_ring()
537 if (IS_ERR((void *)ctx->mmap_base)) { in aio_setup_ring()
538 ctx->mmap_size = 0; in aio_setup_ring()
539 aio_free_ring(ctx); in aio_setup_ring()
543 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); in aio_setup_ring()
545 ctx->user_id = ctx->mmap_base; in aio_setup_ring()
546 ctx->nr_events = nr_events; /* trusted copy */ in aio_setup_ring()
548 ring = kmap_atomic(ctx->ring_pages[0]); in aio_setup_ring()
557 flush_dcache_page(ctx->ring_pages[0]); in aio_setup_ring()
569 struct kioctx *ctx = req->ki_ctx; in kiocb_set_cancel_fn() local
582 spin_lock_irqsave(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
583 list_add_tail(&req->ki_list, &ctx->active_reqs); in kiocb_set_cancel_fn()
585 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in kiocb_set_cancel_fn()
596 struct kioctx *ctx = container_of(to_rcu_work(work), struct kioctx, in free_ioctx() local
598 pr_debug("freeing %p\n", ctx); in free_ioctx()
600 aio_free_ring(ctx); in free_ioctx()
601 free_percpu(ctx->cpu); in free_ioctx()
602 percpu_ref_exit(&ctx->reqs); in free_ioctx()
603 percpu_ref_exit(&ctx->users); in free_ioctx()
604 kmem_cache_free(kioctx_cachep, ctx); in free_ioctx()
609 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); in free_ioctx_reqs() local
612 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) in free_ioctx_reqs()
613 complete(&ctx->rq_wait->comp); in free_ioctx_reqs()
616 INIT_RCU_WORK(&ctx->free_rwork, free_ioctx); in free_ioctx_reqs()
617 queue_rcu_work(system_wq, &ctx->free_rwork); in free_ioctx_reqs()
627 struct kioctx *ctx = container_of(ref, struct kioctx, users); in free_ioctx_users() local
630 spin_lock_irq(&ctx->ctx_lock); in free_ioctx_users()
632 while (!list_empty(&ctx->active_reqs)) { in free_ioctx_users()
633 req = list_first_entry(&ctx->active_reqs, in free_ioctx_users()
639 spin_unlock_irq(&ctx->ctx_lock); in free_ioctx_users()
641 percpu_ref_kill(&ctx->reqs); in free_ioctx_users()
642 percpu_ref_put(&ctx->reqs); in free_ioctx_users()
645 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) in ioctx_add_table() argument
658 ctx->id = i; in ioctx_add_table()
659 rcu_assign_pointer(table->table[i], ctx); in ioctx_add_table()
666 ring = kmap_atomic(ctx->ring_pages[0]); in ioctx_add_table()
667 ring->id = ctx->id; in ioctx_add_table()
716 struct kioctx *ctx; in ioctx_alloc() local
746 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); in ioctx_alloc()
747 if (!ctx) in ioctx_alloc()
750 ctx->max_reqs = max_reqs; in ioctx_alloc()
752 spin_lock_init(&ctx->ctx_lock); in ioctx_alloc()
753 spin_lock_init(&ctx->completion_lock); in ioctx_alloc()
754 mutex_init(&ctx->ring_lock); in ioctx_alloc()
757 mutex_lock(&ctx->ring_lock); in ioctx_alloc()
758 init_waitqueue_head(&ctx->wait); in ioctx_alloc()
760 INIT_LIST_HEAD(&ctx->active_reqs); in ioctx_alloc()
762 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) in ioctx_alloc()
765 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) in ioctx_alloc()
768 ctx->cpu = alloc_percpu(struct kioctx_cpu); in ioctx_alloc()
769 if (!ctx->cpu) in ioctx_alloc()
772 err = aio_setup_ring(ctx, nr_events); in ioctx_alloc()
776 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); in ioctx_alloc()
777 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); in ioctx_alloc()
778 if (ctx->req_batch < 1) in ioctx_alloc()
779 ctx->req_batch = 1; in ioctx_alloc()
783 if (aio_nr + ctx->max_reqs > aio_max_nr || in ioctx_alloc()
784 aio_nr + ctx->max_reqs < aio_nr) { in ioctx_alloc()
789 aio_nr += ctx->max_reqs; in ioctx_alloc()
792 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ in ioctx_alloc()
793 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ in ioctx_alloc()
795 err = ioctx_add_table(ctx, mm); in ioctx_alloc()
800 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
803 ctx, ctx->user_id, mm, ctx->nr_events); in ioctx_alloc()
804 return ctx; in ioctx_alloc()
807 aio_nr_sub(ctx->max_reqs); in ioctx_alloc()
809 atomic_set(&ctx->dead, 1); in ioctx_alloc()
810 if (ctx->mmap_size) in ioctx_alloc()
811 vm_munmap(ctx->mmap_base, ctx->mmap_size); in ioctx_alloc()
812 aio_free_ring(ctx); in ioctx_alloc()
814 mutex_unlock(&ctx->ring_lock); in ioctx_alloc()
815 free_percpu(ctx->cpu); in ioctx_alloc()
816 percpu_ref_exit(&ctx->reqs); in ioctx_alloc()
817 percpu_ref_exit(&ctx->users); in ioctx_alloc()
818 kmem_cache_free(kioctx_cachep, ctx); in ioctx_alloc()
828 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, in kill_ioctx() argument
834 if (atomic_xchg(&ctx->dead, 1)) { in kill_ioctx()
840 WARN_ON(ctx != rcu_access_pointer(table->table[ctx->id])); in kill_ioctx()
841 RCU_INIT_POINTER(table->table[ctx->id], NULL); in kill_ioctx()
845 wake_up_all(&ctx->wait); in kill_ioctx()
854 aio_nr_sub(ctx->max_reqs); in kill_ioctx()
856 if (ctx->mmap_size) in kill_ioctx()
857 vm_munmap(ctx->mmap_base, ctx->mmap_size); in kill_ioctx()
859 ctx->rq_wait = wait; in kill_ioctx()
860 percpu_ref_kill(&ctx->users); in kill_ioctx()
886 struct kioctx *ctx = in exit_aio() local
889 if (!ctx) { in exit_aio()
901 ctx->mmap_size = 0; in exit_aio()
902 kill_ioctx(mm, ctx, &wait); in exit_aio()
914 static void put_reqs_available(struct kioctx *ctx, unsigned nr) in put_reqs_available() argument
920 kcpu = this_cpu_ptr(ctx->cpu); in put_reqs_available()
923 while (kcpu->reqs_available >= ctx->req_batch * 2) { in put_reqs_available()
924 kcpu->reqs_available -= ctx->req_batch; in put_reqs_available()
925 atomic_add(ctx->req_batch, &ctx->reqs_available); in put_reqs_available()
931 static bool __get_reqs_available(struct kioctx *ctx) in __get_reqs_available() argument
938 kcpu = this_cpu_ptr(ctx->cpu); in __get_reqs_available()
940 int old, avail = atomic_read(&ctx->reqs_available); in __get_reqs_available()
943 if (avail < ctx->req_batch) in __get_reqs_available()
947 avail = atomic_cmpxchg(&ctx->reqs_available, in __get_reqs_available()
948 avail, avail - ctx->req_batch); in __get_reqs_available()
951 kcpu->reqs_available += ctx->req_batch; in __get_reqs_available()
968 static void refill_reqs_available(struct kioctx *ctx, unsigned head, in refill_reqs_available() argument
974 head %= ctx->nr_events; in refill_reqs_available()
978 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available()
980 completed = ctx->completed_events; in refill_reqs_available()
989 ctx->completed_events -= completed; in refill_reqs_available()
990 put_reqs_available(ctx, completed); in refill_reqs_available()
997 static void user_refill_reqs_available(struct kioctx *ctx) in user_refill_reqs_available() argument
999 spin_lock_irq(&ctx->completion_lock); in user_refill_reqs_available()
1000 if (ctx->completed_events) { in user_refill_reqs_available()
1013 ring = kmap_atomic(ctx->ring_pages[0]); in user_refill_reqs_available()
1017 refill_reqs_available(ctx, head, ctx->tail); in user_refill_reqs_available()
1020 spin_unlock_irq(&ctx->completion_lock); in user_refill_reqs_available()
1023 static bool get_reqs_available(struct kioctx *ctx) in get_reqs_available() argument
1025 if (__get_reqs_available(ctx)) in get_reqs_available()
1027 user_refill_reqs_available(ctx); in get_reqs_available()
1028 return __get_reqs_available(ctx); in get_reqs_available()
1038 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) in aio_get_req() argument
1046 if (unlikely(!get_reqs_available(ctx))) { in aio_get_req()
1051 percpu_ref_get(&ctx->reqs); in aio_get_req()
1052 req->ki_ctx = ctx; in aio_get_req()
1063 struct kioctx *ctx, *ret = NULL; in lookup_ioctx() local
1077 ctx = rcu_dereference(table->table[id]); in lookup_ioctx()
1078 if (ctx && ctx->user_id == ctx_id) { in lookup_ioctx()
1079 if (percpu_ref_tryget_live(&ctx->users)) in lookup_ioctx()
1080 ret = ctx; in lookup_ioctx()
1102 struct kioctx *ctx = iocb->ki_ctx; in aio_complete() local
1113 spin_lock_irqsave(&ctx->completion_lock, flags); in aio_complete()
1115 tail = ctx->tail; in aio_complete()
1118 if (++tail >= ctx->nr_events) in aio_complete()
1121 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1127 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); in aio_complete()
1129 pr_debug("%p[%u]: %p: %p %Lx %Lx %Lx\n", ctx, tail, iocb, in aio_complete()
1138 ctx->tail = tail; in aio_complete()
1140 ring = kmap_atomic(ctx->ring_pages[0]); in aio_complete()
1144 flush_dcache_page(ctx->ring_pages[0]); in aio_complete()
1146 ctx->completed_events++; in aio_complete()
1147 if (ctx->completed_events > 1) in aio_complete()
1148 refill_reqs_available(ctx, head, tail); in aio_complete()
1149 spin_unlock_irqrestore(&ctx->completion_lock, flags); in aio_complete()
1169 if (waitqueue_active(&ctx->wait)) in aio_complete()
1170 wake_up(&ctx->wait); in aio_complete()
1185 static long aio_read_events_ring(struct kioctx *ctx, in aio_read_events_ring() argument
1200 mutex_lock(&ctx->ring_lock); in aio_read_events_ring()
1203 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1214 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); in aio_read_events_ring()
1219 head %= ctx->nr_events; in aio_read_events_ring()
1220 tail %= ctx->nr_events; in aio_read_events_ring()
1227 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1232 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; in aio_read_events_ring()
1250 head %= ctx->nr_events; in aio_read_events_ring()
1253 ring = kmap_atomic(ctx->ring_pages[0]); in aio_read_events_ring()
1256 flush_dcache_page(ctx->ring_pages[0]); in aio_read_events_ring()
1260 mutex_unlock(&ctx->ring_lock); in aio_read_events_ring()
1265 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, in aio_read_events() argument
1268 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); in aio_read_events()
1273 if (unlikely(atomic_read(&ctx->dead))) in aio_read_events()
1282 static long read_events(struct kioctx *ctx, long min_nr, long nr, in read_events() argument
1303 aio_read_events(ctx, min_nr, nr, event, &ret); in read_events()
1305 wait_event_interruptible_hrtimeout(ctx->wait, in read_events()
1306 aio_read_events(ctx, min_nr, nr, event, &ret), in read_events()
1327 unsigned long ctx; in SYSCALL_DEFINE2() local
1330 ret = get_user(ctx, ctxp); in SYSCALL_DEFINE2()
1335 if (unlikely(ctx || nr_events == 0)) { in SYSCALL_DEFINE2()
1337 ctx, nr_events); in SYSCALL_DEFINE2()
1358 unsigned long ctx; in COMPAT_SYSCALL_DEFINE2() local
1361 ret = get_user(ctx, ctx32p); in COMPAT_SYSCALL_DEFINE2()
1366 if (unlikely(ctx || nr_events == 0)) { in COMPAT_SYSCALL_DEFINE2()
1368 ctx, nr_events); in COMPAT_SYSCALL_DEFINE2()
1393 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) in SYSCALL_DEFINE1() argument
1395 struct kioctx *ioctx = lookup_ioctx(ctx); in SYSCALL_DEFINE1()
1425 struct kioctx *ctx = iocb->ki_ctx; in aio_remove_iocb() local
1428 spin_lock_irqsave(&ctx->ctx_lock, flags); in aio_remove_iocb()
1430 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_remove_iocb()
1686 struct kioctx *ctx = iocb->ki_ctx; in aio_poll_complete_work() local
1699 spin_lock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1713 spin_unlock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1721 spin_unlock_irq(&ctx->ctx_lock); in aio_poll_complete_work()
1771 struct kioctx *ctx = iocb->ki_ctx; in aio_poll_wake() local
1781 spin_unlock_irqrestore(&ctx->ctx_lock, flags); in aio_poll_wake()
1853 struct kioctx *ctx = aiocb->ki_ctx; in aio_poll() local
1885 spin_lock_irq(&ctx->ctx_lock); in aio_poll()
1910 list_add_tail(&aiocb->ki_list, &ctx->active_reqs); in aio_poll()
1920 spin_unlock_irq(&ctx->ctx_lock); in aio_poll()
1926 static int __io_submit_one(struct kioctx *ctx, const struct iocb *iocb, in __io_submit_one() argument
1980 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, in io_submit_one() argument
2006 req = aio_get_req(ctx); in io_submit_one()
2010 err = __io_submit_one(ctx, &iocb, user_iocb, req, compat); in io_submit_one()
2022 put_reqs_available(ctx, 1); in io_submit_one()
2042 struct kioctx *ctx; in SYSCALL_DEFINE3() local
2050 ctx = lookup_ioctx(ctx_id); in SYSCALL_DEFINE3()
2051 if (unlikely(!ctx)) { in SYSCALL_DEFINE3()
2056 if (nr > ctx->nr_events) in SYSCALL_DEFINE3()
2057 nr = ctx->nr_events; in SYSCALL_DEFINE3()
2069 ret = io_submit_one(ctx, user_iocb, false); in SYSCALL_DEFINE3()
2076 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()
2084 struct kioctx *ctx; in COMPAT_SYSCALL_DEFINE3() local
2092 ctx = lookup_ioctx(ctx_id); in COMPAT_SYSCALL_DEFINE3()
2093 if (unlikely(!ctx)) { in COMPAT_SYSCALL_DEFINE3()
2098 if (nr > ctx->nr_events) in COMPAT_SYSCALL_DEFINE3()
2099 nr = ctx->nr_events; in COMPAT_SYSCALL_DEFINE3()
2111 ret = io_submit_one(ctx, compat_ptr(user_iocb), true); in COMPAT_SYSCALL_DEFINE3()
2118 percpu_ref_put(&ctx->users); in COMPAT_SYSCALL_DEFINE3()
2136 struct kioctx *ctx; in SYSCALL_DEFINE3() local
2147 ctx = lookup_ioctx(ctx_id); in SYSCALL_DEFINE3()
2148 if (unlikely(!ctx)) in SYSCALL_DEFINE3()
2151 spin_lock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
2153 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { in SYSCALL_DEFINE3()
2160 spin_unlock_irq(&ctx->ctx_lock); in SYSCALL_DEFINE3()
2171 percpu_ref_put(&ctx->users); in SYSCALL_DEFINE3()