Lines Matching refs:p
1307 static struct io_ring_ctx *io_ring_ctx_alloc(struct io_uring_params *p) in io_ring_ctx_alloc() argument
1320 hash_bits = ilog2(p->cq_entries); in io_ring_ctx_alloc()
1341 ctx->flags = p->flags; in io_ring_ctx_alloc()
4448 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers_prep() local
4459 memset(p, 0, sizeof(*p)); in io_remove_buffers_prep()
4460 p->nbufs = tmp; in io_remove_buffers_prep()
4461 p->bgid = READ_ONCE(sqe->buf_group); in io_remove_buffers_prep()
4494 struct io_provide_buf *p = &req->pbuf; in io_remove_buffers() local
4505 head = xa_load(&ctx->io_buffers, p->bgid); in io_remove_buffers()
4507 ret = __io_remove_buffers(ctx, head, p->bgid, p->nbufs); in io_remove_buffers()
4521 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers_prep() local
4530 p->nbufs = tmp; in io_provide_buffers_prep()
4531 p->addr = READ_ONCE(sqe->addr); in io_provide_buffers_prep()
4532 p->len = READ_ONCE(sqe->len); in io_provide_buffers_prep()
4534 if (check_mul_overflow((unsigned long)p->len, (unsigned long)p->nbufs, in io_provide_buffers_prep()
4537 if (check_add_overflow((unsigned long)p->addr, size, &tmp_check)) in io_provide_buffers_prep()
4540 size = (unsigned long)p->len * p->nbufs; in io_provide_buffers_prep()
4541 if (!access_ok(u64_to_user_ptr(p->addr), size)) in io_provide_buffers_prep()
4544 p->bgid = READ_ONCE(sqe->buf_group); in io_provide_buffers_prep()
4548 p->bid = tmp; in io_provide_buffers_prep()
4582 struct io_provide_buf *p = &req->pbuf; in io_provide_buffers() local
4592 list = head = xa_load(&ctx->io_buffers, p->bgid); in io_provide_buffers()
4594 ret = io_add_buffers(p, &head); in io_provide_buffers()
4596 ret = xa_insert(&ctx->io_buffers, p->bgid, head, in io_provide_buffers()
4599 __io_remove_buffers(ctx, head, p->bgid, -1U); in io_provide_buffers()
5861 struct poll_table_struct *p) in io_poll_queue_proc() argument
5863 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); in io_poll_queue_proc()
5928 struct poll_table_struct *p) in io_async_queue_proc() argument
5930 struct io_poll_table *pt = container_of(p, struct io_poll_table, pt); in io_async_queue_proc()
8277 static struct io_sq_data *io_attach_sq_data(struct io_uring_params *p) in io_attach_sq_data() argument
8283 f = fdget(p->wq_fd); in io_attach_sq_data()
8307 static struct io_sq_data *io_get_sq_data(struct io_uring_params *p, in io_get_sq_data() argument
8313 if (p->flags & IORING_SETUP_ATTACH_WQ) { in io_get_sq_data()
8314 sqd = io_attach_sq_data(p); in io_get_sq_data()
8892 struct io_uring_params *p) in io_sq_offload_create() argument
8901 f = fdget(p->wq_fd); in io_sq_offload_create()
8915 sqd = io_get_sq_data(p, &attached); in io_sq_offload_create()
8923 ctx->sq_thread_idle = msecs_to_jiffies(p->sq_thread_idle); in io_sq_offload_create()
8939 if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
8940 int cpu = p->sq_thread_cpu; in io_sq_offload_create()
8963 } else if (p->flags & IORING_SETUP_SQ_AFF) { in io_sq_offload_create()
10449 struct io_uring_params *p) in io_allocate_scq_urings() argument
10455 ctx->sq_entries = p->sq_entries; in io_allocate_scq_urings()
10456 ctx->cq_entries = p->cq_entries; in io_allocate_scq_urings()
10458 size = rings_size(p->sq_entries, p->cq_entries, &sq_array_offset); in io_allocate_scq_urings()
10468 rings->sq_ring_mask = p->sq_entries - 1; in io_allocate_scq_urings()
10469 rings->cq_ring_mask = p->cq_entries - 1; in io_allocate_scq_urings()
10470 rings->sq_ring_entries = p->sq_entries; in io_allocate_scq_urings()
10471 rings->cq_ring_entries = p->cq_entries; in io_allocate_scq_urings()
10473 size = array_size(sizeof(struct io_uring_sqe), p->sq_entries); in io_allocate_scq_urings()
10538 static int io_uring_create(unsigned entries, struct io_uring_params *p, in io_uring_create() argument
10548 if (!(p->flags & IORING_SETUP_CLAMP)) in io_uring_create()
10561 p->sq_entries = roundup_pow_of_two(entries); in io_uring_create()
10562 if (p->flags & IORING_SETUP_CQSIZE) { in io_uring_create()
10568 if (!p->cq_entries) in io_uring_create()
10570 if (p->cq_entries > IORING_MAX_CQ_ENTRIES) { in io_uring_create()
10571 if (!(p->flags & IORING_SETUP_CLAMP)) in io_uring_create()
10573 p->cq_entries = IORING_MAX_CQ_ENTRIES; in io_uring_create()
10575 p->cq_entries = roundup_pow_of_two(p->cq_entries); in io_uring_create()
10576 if (p->cq_entries < p->sq_entries) in io_uring_create()
10579 p->cq_entries = 2 * p->sq_entries; in io_uring_create()
10582 ctx = io_ring_ctx_alloc(p); in io_uring_create()
10598 ret = io_allocate_scq_urings(ctx, p); in io_uring_create()
10602 ret = io_sq_offload_create(ctx, p); in io_uring_create()
10611 memset(&p->sq_off, 0, sizeof(p->sq_off)); in io_uring_create()
10612 p->sq_off.head = offsetof(struct io_rings, sq.head); in io_uring_create()
10613 p->sq_off.tail = offsetof(struct io_rings, sq.tail); in io_uring_create()
10614 p->sq_off.ring_mask = offsetof(struct io_rings, sq_ring_mask); in io_uring_create()
10615 p->sq_off.ring_entries = offsetof(struct io_rings, sq_ring_entries); in io_uring_create()
10616 p->sq_off.flags = offsetof(struct io_rings, sq_flags); in io_uring_create()
10617 p->sq_off.dropped = offsetof(struct io_rings, sq_dropped); in io_uring_create()
10618 p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; in io_uring_create()
10620 memset(&p->cq_off, 0, sizeof(p->cq_off)); in io_uring_create()
10621 p->cq_off.head = offsetof(struct io_rings, cq.head); in io_uring_create()
10622 p->cq_off.tail = offsetof(struct io_rings, cq.tail); in io_uring_create()
10623 p->cq_off.ring_mask = offsetof(struct io_rings, cq_ring_mask); in io_uring_create()
10624 p->cq_off.ring_entries = offsetof(struct io_rings, cq_ring_entries); in io_uring_create()
10625 p->cq_off.overflow = offsetof(struct io_rings, cq_overflow); in io_uring_create()
10626 p->cq_off.cqes = offsetof(struct io_rings, cqes); in io_uring_create()
10627 p->cq_off.flags = offsetof(struct io_rings, cq_flags); in io_uring_create()
10629 p->features = IORING_FEAT_SINGLE_MMAP | IORING_FEAT_NODROP | in io_uring_create()
10636 if (copy_to_user(params, p, sizeof(*p))) { in io_uring_create()
10658 trace_io_uring_create(ret, ctx, p->sq_entries, p->cq_entries, p->flags); in io_uring_create()
10672 struct io_uring_params p; in io_uring_setup() local
10675 if (copy_from_user(&p, params, sizeof(p))) in io_uring_setup()
10677 for (i = 0; i < ARRAY_SIZE(p.resv); i++) { in io_uring_setup()
10678 if (p.resv[i]) in io_uring_setup()
10682 if (p.flags & ~(IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL | in io_uring_setup()
10688 return io_uring_create(entries, &p, params); in io_uring_setup()
10699 struct io_uring_probe *p; in io_probe() local
10703 size = struct_size(p, ops, nr_args); in io_probe()
10706 p = kzalloc(size, GFP_KERNEL); in io_probe()
10707 if (!p) in io_probe()
10711 if (copy_from_user(p, arg, size)) in io_probe()
10714 if (memchr_inv(p, 0, size)) in io_probe()
10717 p->last_op = IORING_OP_LAST - 1; in io_probe()
10722 p->ops[i].op = i; in io_probe()
10724 p->ops[i].flags = IO_URING_OP_SUPPORTED; in io_probe()
10726 p->ops_len = i; in io_probe()
10729 if (copy_to_user(arg, p, size)) in io_probe()
10732 kfree(p); in io_probe()