Lines Matching refs:nr_pages
8978 unsigned long nr_pages) in __io_unaccount_mem() argument
8980 atomic_long_sub(nr_pages, &user->locked_vm); in __io_unaccount_mem()
8984 unsigned long nr_pages) in __io_account_mem() argument
8993 new_pages = cur_pages + nr_pages; in __io_account_mem()
9002 static void io_unaccount_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) in io_unaccount_mem() argument
9005 __io_unaccount_mem(ctx->user, nr_pages); in io_unaccount_mem()
9008 atomic64_sub(nr_pages, &ctx->mm_account->pinned_vm); in io_unaccount_mem()
9011 static int io_account_mem(struct io_ring_ctx *ctx, unsigned long nr_pages) in io_account_mem() argument
9016 ret = __io_account_mem(ctx->user, nr_pages); in io_account_mem()
9022 atomic64_add(nr_pages, &ctx->mm_account->pinned_vm); in io_account_mem()
9164 int nr_pages, struct page *hpage) in headpage_already_acct() argument
9169 for (i = 0; i < nr_pages; i++) { in headpage_already_acct()
9192 int nr_pages, struct io_mapped_ubuf *imu, in io_buffer_account_pin() argument
9198 for (i = 0; i < nr_pages; i++) { in io_buffer_account_pin()
9232 int ret, pret, nr_pages, i; in io_sqe_buffer_register() local
9242 nr_pages = end - start; in io_sqe_buffer_register()
9247 pages = kvmalloc_array(nr_pages, sizeof(struct page *), GFP_KERNEL); in io_sqe_buffer_register()
9251 vmas = kvmalloc_array(nr_pages, sizeof(struct vm_area_struct *), in io_sqe_buffer_register()
9256 imu = kvmalloc(struct_size(imu, bvec, nr_pages), GFP_KERNEL); in io_sqe_buffer_register()
9262 pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, in io_sqe_buffer_register()
9264 if (pret == nr_pages) { in io_sqe_buffer_register()
9268 for (i = 0; i < nr_pages; i++) { in io_sqe_buffer_register()
9302 for (i = 0; i < nr_pages; i++) { in io_sqe_buffer_register()
9315 imu->nr_bvecs = nr_pages; in io_sqe_buffer_register()