• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef IO_URING_TYPES_H
2 #define IO_URING_TYPES_H
3 
4 #include <linux/blkdev.h>
5 #include <linux/hashtable.h>
6 #include <linux/task_work.h>
7 #include <linux/bitmap.h>
8 #include <linux/llist.h>
9 #include <linux/android_kabi.h>
10 #include <uapi/linux/io_uring.h>
11 
12 enum {
13 	/*
14 	 * A hint to not wake right away but delay until there are enough of
15 	 * tw's queued to match the number of CQEs the task is waiting for.
16 	 *
17 	 * Must not be used with requests generating more than one CQE.
18 	 * It's also ignored unless IORING_SETUP_DEFER_TASKRUN is set.
19 	 */
20 	IOU_F_TWQ_LAZY_WAKE			= 1,
21 };
22 
23 enum io_uring_cmd_flags {
24 	IO_URING_F_COMPLETE_DEFER	= 1,
25 	IO_URING_F_UNLOCKED		= 2,
26 	/* the request is executed from poll, it should not be freed */
27 	IO_URING_F_MULTISHOT		= 4,
28 	/* executed by io-wq */
29 	IO_URING_F_IOWQ			= 8,
30 	/* int's last bit, sign checks are usually faster than a bit test */
31 	IO_URING_F_NONBLOCK		= INT_MIN,
32 
33 	/* ctx state flags, for URING_CMD */
34 	IO_URING_F_SQE128		= (1 << 8),
35 	IO_URING_F_CQE32		= (1 << 9),
36 	IO_URING_F_IOPOLL		= (1 << 10),
37 
38 	/* set when uring wants to cancel a previously issued command */
39 	IO_URING_F_CANCEL		= (1 << 11),
40 	IO_URING_F_COMPAT		= (1 << 12),
41 	IO_URING_F_TASK_DEAD		= (1 << 13),
42 };
43 
44 struct io_wq_work_node {
45 	struct io_wq_work_node *next;
46 };
47 
48 struct io_wq_work_list {
49 	struct io_wq_work_node *first;
50 	struct io_wq_work_node *last;
51 };
52 
53 struct io_wq_work {
54 	struct io_wq_work_node list;
55 	atomic_t flags;
56 	/* place it here instead of io_kiocb as it fills padding and saves 4B */
57 	int cancel_seq;
58 };
59 
60 struct io_fixed_file {
61 	/* file * with additional FFS_* flags */
62 	unsigned long file_ptr;
63 };
64 
65 struct io_file_table {
66 	struct io_fixed_file *files;
67 	unsigned long *bitmap;
68 	unsigned int alloc_hint;
69 };
70 
71 struct io_hash_bucket {
72 	spinlock_t		lock;
73 	struct hlist_head	list;
74 } ____cacheline_aligned_in_smp;
75 
76 struct io_hash_table {
77 	struct io_hash_bucket	*hbs;
78 	unsigned		hash_bits;
79 };
80 
81 /*
82  * Arbitrary limit, can be raised if need be
83  */
84 #define IO_RINGFD_REG_MAX 16
85 
86 struct io_uring_task {
87 	/* submission side */
88 	int				cached_refs;
89 	const struct io_ring_ctx 	*last;
90 	struct io_wq			*io_wq;
91 	struct file			*registered_rings[IO_RINGFD_REG_MAX];
92 
93 	struct xarray			xa;
94 	struct wait_queue_head		wait;
95 	atomic_t			in_cancel;
96 	atomic_t			inflight_tracked;
97 	struct percpu_counter		inflight;
98 
99 	struct { /* task_work */
100 		struct llist_head	task_list;
101 		struct callback_head	task_work;
102 	} ____cacheline_aligned_in_smp;
103 };
104 
105 struct io_uring {
106 	u32 head;
107 	u32 tail;
108 };
109 
110 /*
111  * This data is shared with the application through the mmap at offsets
112  * IORING_OFF_SQ_RING and IORING_OFF_CQ_RING.
113  *
114  * The offsets to the member fields are published through struct
115  * io_sqring_offsets when calling io_uring_setup.
116  */
117 struct io_rings {
118 	/*
119 	 * Head and tail offsets into the ring; the offsets need to be
120 	 * masked to get valid indices.
121 	 *
122 	 * The kernel controls head of the sq ring and the tail of the cq ring,
123 	 * and the application controls tail of the sq ring and the head of the
124 	 * cq ring.
125 	 */
126 	struct io_uring		sq, cq;
127 	/*
128 	 * Bitmasks to apply to head and tail offsets (constant, equals
129 	 * ring_entries - 1)
130 	 */
131 	u32			sq_ring_mask, cq_ring_mask;
132 	/* Ring sizes (constant, power of 2) */
133 	u32			sq_ring_entries, cq_ring_entries;
134 	/*
135 	 * Number of invalid entries dropped by the kernel due to
136 	 * invalid index stored in array
137 	 *
138 	 * Written by the kernel, shouldn't be modified by the
139 	 * application (i.e. get number of "new events" by comparing to
140 	 * cached value).
141 	 *
142 	 * After a new SQ head value was read by the application this
143 	 * counter includes all submissions that were dropped reaching
144 	 * the new SQ head (and possibly more).
145 	 */
146 	u32			sq_dropped;
147 	/*
148 	 * Runtime SQ flags
149 	 *
150 	 * Written by the kernel, shouldn't be modified by the
151 	 * application.
152 	 *
153 	 * The application needs a full memory barrier before checking
154 	 * for IORING_SQ_NEED_WAKEUP after updating the sq tail.
155 	 */
156 	atomic_t		sq_flags;
157 	/*
158 	 * Runtime CQ flags
159 	 *
160 	 * Written by the application, shouldn't be modified by the
161 	 * kernel.
162 	 */
163 	u32			cq_flags;
164 	/*
165 	 * Number of completion events lost because the queue was full;
166 	 * this should be avoided by the application by making sure
167 	 * there are not more requests pending than there is space in
168 	 * the completion queue.
169 	 *
170 	 * Written by the kernel, shouldn't be modified by the
171 	 * application (i.e. get number of "new events" by comparing to
172 	 * cached value).
173 	 *
174 	 * As completion events come in out of order this counter is not
175 	 * ordered with any other data.
176 	 */
177 	u32			cq_overflow;
178 	/*
179 	 * Ring buffer of completion events.
180 	 *
181 	 * The kernel writes completion events fresh every time they are
182 	 * produced, so the application is allowed to modify pending
183 	 * entries.
184 	 */
185 	struct io_uring_cqe	cqes[] ____cacheline_aligned_in_smp;
186 };
187 
188 struct io_restriction {
189 	DECLARE_BITMAP(register_op, IORING_REGISTER_LAST);
190 	DECLARE_BITMAP(sqe_op, IORING_OP_LAST);
191 	u8 sqe_flags_allowed;
192 	u8 sqe_flags_required;
193 	bool registered;
194 };
195 
196 struct io_submit_link {
197 	struct io_kiocb		*head;
198 	struct io_kiocb		*last;
199 };
200 
201 struct io_submit_state {
202 	/* inline/task_work completion list, under ->uring_lock */
203 	struct io_wq_work_node	free_list;
204 	/* batch completion logic */
205 	struct io_wq_work_list	compl_reqs;
206 	struct io_submit_link	link;
207 
208 	bool			plug_started;
209 	bool			need_plug;
210 	bool			cq_flush;
211 	unsigned short		submit_nr;
212 	struct blk_plug		plug;
213 };
214 
215 struct io_alloc_cache {
216 	void			**entries;
217 	unsigned int		nr_cached;
218 	unsigned int		max_cached;
219 	size_t			elem_size;
220 };
221 
222 struct io_ring_ctx {
223 	/* const or read-mostly hot data */
224 	struct {
225 		unsigned int		flags;
226 		unsigned int		drain_next: 1;
227 		unsigned int		restricted: 1;
228 		unsigned int		off_timeout_used: 1;
229 		unsigned int		drain_active: 1;
230 		unsigned int		has_evfd: 1;
231 		/* all CQEs should be posted only by the submitter task */
232 		unsigned int		task_complete: 1;
233 		unsigned int		lockless_cq: 1;
234 		unsigned int		syscall_iopoll: 1;
235 		unsigned int		poll_activated: 1;
236 		unsigned int		drain_disabled: 1;
237 		unsigned int		compat: 1;
238 		unsigned int		iowq_limits_set : 1;
239 
240 		struct task_struct	*submitter_task;
241 		struct io_rings		*rings;
242 		struct percpu_ref	refs;
243 
244 		clockid_t		clockid;
245 		enum tk_offsets		clock_offset;
246 
247 		enum task_work_notify_mode	notify_method;
248 		unsigned			sq_thread_idle;
249 	} ____cacheline_aligned_in_smp;
250 
251 	/* submission data */
252 	struct {
253 		struct mutex		uring_lock;
254 
255 		/*
256 		 * Ring buffer of indices into array of io_uring_sqe, which is
257 		 * mmapped by the application using the IORING_OFF_SQES offset.
258 		 *
259 		 * This indirection could e.g. be used to assign fixed
260 		 * io_uring_sqe entries to operations and only submit them to
261 		 * the queue when needed.
262 		 *
263 		 * The kernel modifies neither the indices array nor the entries
264 		 * array.
265 		 */
266 		u32			*sq_array;
267 		struct io_uring_sqe	*sq_sqes;
268 		unsigned		cached_sq_head;
269 		unsigned		sq_entries;
270 
271 		/*
272 		 * Fixed resources fast path, should be accessed only under
273 		 * uring_lock, and updated through io_uring_register(2)
274 		 */
275 		struct io_rsrc_node	*rsrc_node;
276 		atomic_t		cancel_seq;
277 
278 		/*
279 		 * ->iopoll_list is protected by the ctx->uring_lock for
280 		 * io_uring instances that don't use IORING_SETUP_SQPOLL.
281 		 * For SQPOLL, only the single threaded io_sq_thread() will
282 		 * manipulate the list, hence no extra locking is needed there.
283 		 */
284 		bool			poll_multi_queue;
285 		struct io_wq_work_list	iopoll_list;
286 
287 		struct io_file_table	file_table;
288 		struct io_mapped_ubuf	**user_bufs;
289 		unsigned		nr_user_files;
290 		unsigned		nr_user_bufs;
291 
292 		struct io_submit_state	submit_state;
293 
294 		struct xarray		io_bl_xa;
295 
296 		struct io_hash_table	cancel_table_locked;
297 		struct io_alloc_cache	apoll_cache;
298 		struct io_alloc_cache	netmsg_cache;
299 		struct io_alloc_cache	rw_cache;
300 		struct io_alloc_cache	uring_cache;
301 
302 		/*
303 		 * Any cancelable uring_cmd is added to this list in
304 		 * ->uring_cmd() by io_uring_cmd_insert_cancelable()
305 		 */
306 		struct hlist_head	cancelable_uring_cmd;
307 	} ____cacheline_aligned_in_smp;
308 
309 	struct {
310 		/*
311 		 * We cache a range of free CQEs we can use, once exhausted it
312 		 * should go through a slower range setup, see __io_get_cqe()
313 		 */
314 		struct io_uring_cqe	*cqe_cached;
315 		struct io_uring_cqe	*cqe_sentinel;
316 
317 		unsigned		cached_cq_tail;
318 		unsigned		cq_entries;
319 		struct io_ev_fd	__rcu	*io_ev_fd;
320 		unsigned		cq_extra;
321 	} ____cacheline_aligned_in_smp;
322 
323 	/*
324 	 * task_work and async notification delivery cacheline. Expected to
325 	 * regularly bounce b/w CPUs.
326 	 */
327 	struct {
328 		struct llist_head	work_llist;
329 		unsigned long		check_cq;
330 		atomic_t		cq_wait_nr;
331 		atomic_t		cq_timeouts;
332 		struct wait_queue_head	cq_wait;
333 	} ____cacheline_aligned_in_smp;
334 
335 	/* timeouts */
336 	struct {
337 		spinlock_t		timeout_lock;
338 		struct list_head	timeout_list;
339 		struct list_head	ltimeout_list;
340 		unsigned		cq_last_tm_flush;
341 	} ____cacheline_aligned_in_smp;
342 
343 	spinlock_t		completion_lock;
344 
345 	struct list_head	io_buffers_comp;
346 	struct list_head	cq_overflow_list;
347 	struct io_hash_table	cancel_table;
348 
349 	struct hlist_head	waitid_list;
350 
351 #ifdef CONFIG_FUTEX
352 	struct hlist_head	futex_list;
353 	struct io_alloc_cache	futex_cache;
354 #endif
355 
356 	const struct cred	*sq_creds;	/* cred used for __io_sq_thread() */
357 	struct io_sq_data	*sq_data;	/* if using sq thread polling */
358 
359 	struct wait_queue_head	sqo_sq_wait;
360 	struct list_head	sqd_list;
361 
362 	unsigned int		file_alloc_start;
363 	unsigned int		file_alloc_end;
364 
365 	struct list_head	io_buffers_cache;
366 
367 	/* Keep this last, we don't need it for the fast path */
368 	struct wait_queue_head		poll_wq;
369 	struct io_restriction		restrictions;
370 
371 	/* slow path rsrc auxilary data, used by update/register */
372 	struct io_rsrc_data		*file_data;
373 	struct io_rsrc_data		*buf_data;
374 
375 	/* protected by ->uring_lock */
376 	struct list_head		rsrc_ref_list;
377 	struct io_alloc_cache		rsrc_node_cache;
378 	struct wait_queue_head		rsrc_quiesce_wq;
379 	unsigned			rsrc_quiesce;
380 
381 	u32			pers_next;
382 	struct xarray		personalities;
383 
384 	/* hashed buffered write serialization */
385 	struct io_wq_hash		*hash_map;
386 
387 	/* Only used for accounting purposes */
388 	struct user_struct		*user;
389 	struct mm_struct		*mm_account;
390 
391 	/* ctx exit and cancelation */
392 	struct llist_head		fallback_llist;
393 	struct delayed_work		fallback_work;
394 	struct work_struct		exit_work;
395 	struct list_head		tctx_list;
396 	struct completion		ref_comp;
397 
398 	/* io-wq management, e.g. thread count */
399 	u32				iowq_limits[2];
400 
401 	struct callback_head		poll_wq_task_work;
402 	struct list_head		defer_list;
403 
404 	struct io_alloc_cache		msg_cache;
405 	spinlock_t			msg_lock;
406 
407 #ifdef CONFIG_NET_RX_BUSY_POLL
408 	struct list_head	napi_list;	/* track busy poll napi_id */
409 	spinlock_t		napi_lock;	/* napi_list lock */
410 
411 	/* napi busy poll default timeout */
412 	ktime_t			napi_busy_poll_dt;
413 	bool			napi_prefer_busy_poll;
414 	bool			napi_enabled;
415 
416 	DECLARE_HASHTABLE(napi_ht, 4);
417 #endif
418 
419 	/* protected by ->completion_lock */
420 	unsigned			evfd_last_cq_tail;
421 
422 	/*
423 	 * If IORING_SETUP_NO_MMAP is used, then the below holds
424 	 * the gup'ed pages for the two rings, and the sqes.
425 	 */
426 	unsigned short			n_ring_pages;
427 	unsigned short			n_sqe_pages;
428 	struct page			**ring_pages;
429 	struct page			**sqe_pages;
430 };
431 
432 struct io_tw_state {
433 };
434 
435 enum {
436 	REQ_F_FIXED_FILE_BIT	= IOSQE_FIXED_FILE_BIT,
437 	REQ_F_IO_DRAIN_BIT	= IOSQE_IO_DRAIN_BIT,
438 	REQ_F_LINK_BIT		= IOSQE_IO_LINK_BIT,
439 	REQ_F_HARDLINK_BIT	= IOSQE_IO_HARDLINK_BIT,
440 	REQ_F_FORCE_ASYNC_BIT	= IOSQE_ASYNC_BIT,
441 	REQ_F_BUFFER_SELECT_BIT	= IOSQE_BUFFER_SELECT_BIT,
442 	REQ_F_CQE_SKIP_BIT	= IOSQE_CQE_SKIP_SUCCESS_BIT,
443 
444 	/* first byte is taken by user flags, shift it to not overlap */
445 	REQ_F_FAIL_BIT		= 8,
446 	REQ_F_INFLIGHT_BIT,
447 	REQ_F_CUR_POS_BIT,
448 	REQ_F_NOWAIT_BIT,
449 	REQ_F_LINK_TIMEOUT_BIT,
450 	REQ_F_NEED_CLEANUP_BIT,
451 	REQ_F_POLLED_BIT,
452 	REQ_F_BUFFER_SELECTED_BIT,
453 	REQ_F_BUFFER_RING_BIT,
454 	REQ_F_REISSUE_BIT,
455 	REQ_F_CREDS_BIT,
456 	REQ_F_REFCOUNT_BIT,
457 	REQ_F_ARM_LTIMEOUT_BIT,
458 	REQ_F_ASYNC_DATA_BIT,
459 	REQ_F_SKIP_LINK_CQES_BIT,
460 	REQ_F_SINGLE_POLL_BIT,
461 	REQ_F_DOUBLE_POLL_BIT,
462 	REQ_F_MULTISHOT_BIT,
463 	REQ_F_APOLL_MULTISHOT_BIT,
464 	REQ_F_CLEAR_POLLIN_BIT,
465 	REQ_F_HASH_LOCKED_BIT,
466 	/* keep async read/write and isreg together and in order */
467 	REQ_F_SUPPORT_NOWAIT_BIT,
468 	REQ_F_ISREG_BIT,
469 	REQ_F_POLL_NO_LAZY_BIT,
470 	REQ_F_CAN_POLL_BIT,
471 	REQ_F_BL_EMPTY_BIT,
472 	REQ_F_BL_NO_RECYCLE_BIT,
473 	REQ_F_BUFFERS_COMMIT_BIT,
474 
475 	/* not a real bit, just to check we're not overflowing the space */
476 	__REQ_F_LAST_BIT,
477 };
478 
479 typedef u64 __bitwise io_req_flags_t;
480 #define IO_REQ_FLAG(bitno)	((__force io_req_flags_t) BIT_ULL((bitno)))
481 
482 enum {
483 	/* ctx owns file */
484 	REQ_F_FIXED_FILE	= IO_REQ_FLAG(REQ_F_FIXED_FILE_BIT),
485 	/* drain existing IO first */
486 	REQ_F_IO_DRAIN		= IO_REQ_FLAG(REQ_F_IO_DRAIN_BIT),
487 	/* linked sqes */
488 	REQ_F_LINK		= IO_REQ_FLAG(REQ_F_LINK_BIT),
489 	/* doesn't sever on completion < 0 */
490 	REQ_F_HARDLINK		= IO_REQ_FLAG(REQ_F_HARDLINK_BIT),
491 	/* IOSQE_ASYNC */
492 	REQ_F_FORCE_ASYNC	= IO_REQ_FLAG(REQ_F_FORCE_ASYNC_BIT),
493 	/* IOSQE_BUFFER_SELECT */
494 	REQ_F_BUFFER_SELECT	= IO_REQ_FLAG(REQ_F_BUFFER_SELECT_BIT),
495 	/* IOSQE_CQE_SKIP_SUCCESS */
496 	REQ_F_CQE_SKIP		= IO_REQ_FLAG(REQ_F_CQE_SKIP_BIT),
497 
498 	/* fail rest of links */
499 	REQ_F_FAIL		= IO_REQ_FLAG(REQ_F_FAIL_BIT),
500 	/* on inflight list, should be cancelled and waited on exit reliably */
501 	REQ_F_INFLIGHT		= IO_REQ_FLAG(REQ_F_INFLIGHT_BIT),
502 	/* read/write uses file position */
503 	REQ_F_CUR_POS		= IO_REQ_FLAG(REQ_F_CUR_POS_BIT),
504 	/* must not punt to workers */
505 	REQ_F_NOWAIT		= IO_REQ_FLAG(REQ_F_NOWAIT_BIT),
506 	/* has or had linked timeout */
507 	REQ_F_LINK_TIMEOUT	= IO_REQ_FLAG(REQ_F_LINK_TIMEOUT_BIT),
508 	/* needs cleanup */
509 	REQ_F_NEED_CLEANUP	= IO_REQ_FLAG(REQ_F_NEED_CLEANUP_BIT),
510 	/* already went through poll handler */
511 	REQ_F_POLLED		= IO_REQ_FLAG(REQ_F_POLLED_BIT),
512 	/* buffer already selected */
513 	REQ_F_BUFFER_SELECTED	= IO_REQ_FLAG(REQ_F_BUFFER_SELECTED_BIT),
514 	/* buffer selected from ring, needs commit */
515 	REQ_F_BUFFER_RING	= IO_REQ_FLAG(REQ_F_BUFFER_RING_BIT),
516 	/* caller should reissue async */
517 	REQ_F_REISSUE		= IO_REQ_FLAG(REQ_F_REISSUE_BIT),
518 	/* supports async reads/writes */
519 	REQ_F_SUPPORT_NOWAIT	= IO_REQ_FLAG(REQ_F_SUPPORT_NOWAIT_BIT),
520 	/* regular file */
521 	REQ_F_ISREG		= IO_REQ_FLAG(REQ_F_ISREG_BIT),
522 	/* has creds assigned */
523 	REQ_F_CREDS		= IO_REQ_FLAG(REQ_F_CREDS_BIT),
524 	/* skip refcounting if not set */
525 	REQ_F_REFCOUNT		= IO_REQ_FLAG(REQ_F_REFCOUNT_BIT),
526 	/* there is a linked timeout that has to be armed */
527 	REQ_F_ARM_LTIMEOUT	= IO_REQ_FLAG(REQ_F_ARM_LTIMEOUT_BIT),
528 	/* ->async_data allocated */
529 	REQ_F_ASYNC_DATA	= IO_REQ_FLAG(REQ_F_ASYNC_DATA_BIT),
530 	/* don't post CQEs while failing linked requests */
531 	REQ_F_SKIP_LINK_CQES	= IO_REQ_FLAG(REQ_F_SKIP_LINK_CQES_BIT),
532 	/* single poll may be active */
533 	REQ_F_SINGLE_POLL	= IO_REQ_FLAG(REQ_F_SINGLE_POLL_BIT),
534 	/* double poll may active */
535 	REQ_F_DOUBLE_POLL	= IO_REQ_FLAG(REQ_F_DOUBLE_POLL_BIT),
536 	/* request posts multiple completions, should be set at prep time */
537 	REQ_F_MULTISHOT		= IO_REQ_FLAG(REQ_F_MULTISHOT_BIT),
538 	/* fast poll multishot mode */
539 	REQ_F_APOLL_MULTISHOT	= IO_REQ_FLAG(REQ_F_APOLL_MULTISHOT_BIT),
540 	/* recvmsg special flag, clear EPOLLIN */
541 	REQ_F_CLEAR_POLLIN	= IO_REQ_FLAG(REQ_F_CLEAR_POLLIN_BIT),
542 	/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
543 	REQ_F_HASH_LOCKED	= IO_REQ_FLAG(REQ_F_HASH_LOCKED_BIT),
544 	/* don't use lazy poll wake for this request */
545 	REQ_F_POLL_NO_LAZY	= IO_REQ_FLAG(REQ_F_POLL_NO_LAZY_BIT),
546 	/* file is pollable */
547 	REQ_F_CAN_POLL		= IO_REQ_FLAG(REQ_F_CAN_POLL_BIT),
548 	/* buffer list was empty after selection of buffer */
549 	REQ_F_BL_EMPTY		= IO_REQ_FLAG(REQ_F_BL_EMPTY_BIT),
550 	/* don't recycle provided buffers for this request */
551 	REQ_F_BL_NO_RECYCLE	= IO_REQ_FLAG(REQ_F_BL_NO_RECYCLE_BIT),
552 	/* buffer ring head needs incrementing on put */
553 	REQ_F_BUFFERS_COMMIT	= IO_REQ_FLAG(REQ_F_BUFFERS_COMMIT_BIT),
554 };
555 
556 typedef void (*io_req_tw_func_t)(struct io_kiocb *req, struct io_tw_state *ts);
557 
558 struct io_task_work {
559 	struct llist_node		node;
560 	io_req_tw_func_t		func;
561 };
562 
563 struct io_cqe {
564 	__u64	user_data;
565 	__s32	res;
566 	/* fd initially, then cflags for completion */
567 	union {
568 		__u32	flags;
569 		int	fd;
570 	};
571 };
572 
573 /*
574  * Each request type overlays its private data structure on top of this one.
575  * They must not exceed this one in size.
576  */
577 struct io_cmd_data {
578 	struct file		*file;
579 	/* each command gets 56 bytes of data */
580 	__u8			data[56];
581 };
582 
io_kiocb_cmd_sz_check(size_t cmd_sz)583 static inline void io_kiocb_cmd_sz_check(size_t cmd_sz)
584 {
585 	BUILD_BUG_ON(cmd_sz > sizeof(struct io_cmd_data));
586 }
587 #define io_kiocb_to_cmd(req, cmd_type) ( \
588 	io_kiocb_cmd_sz_check(sizeof(cmd_type)) , \
589 	((cmd_type *)&(req)->cmd) \
590 )
591 #define cmd_to_io_kiocb(ptr)	((struct io_kiocb *) ptr)
592 
593 struct io_kiocb {
594 	union {
595 		/*
596 		 * NOTE! Each of the io_kiocb union members has the file pointer
597 		 * as the first entry in their struct definition. So you can
598 		 * access the file pointer through any of the sub-structs,
599 		 * or directly as just 'file' in this struct.
600 		 */
601 		struct file		*file;
602 		struct io_cmd_data	cmd;
603 	};
604 
605 	u8				opcode;
606 	/* polled IO has completed */
607 	u8				iopoll_completed;
608 	/*
609 	 * Can be either a fixed buffer index, or used with provided buffers.
610 	 * For the latter, before issue it points to the buffer group ID,
611 	 * and after selection it points to the buffer ID itself.
612 	 */
613 	u16				buf_index;
614 
615 	unsigned			nr_tw;
616 
617 	/* REQ_F_* flags */
618 	io_req_flags_t			flags;
619 
620 	struct io_cqe			cqe;
621 
622 	struct io_ring_ctx		*ctx;
623 	struct task_struct		*task;
624 
625 	union {
626 		/* store used ubuf, so we can prevent reloading */
627 		struct io_mapped_ubuf	*imu;
628 
629 		/* stores selected buf, valid IFF REQ_F_BUFFER_SELECTED is set */
630 		struct io_buffer	*kbuf;
631 
632 		/*
633 		 * stores buffer ID for ring provided buffers, valid IFF
634 		 * REQ_F_BUFFER_RING is set.
635 		 */
636 		struct io_buffer_list	*buf_list;
637 	};
638 
639 	union {
640 		/* used by request caches, completion batching and iopoll */
641 		struct io_wq_work_node	comp_list;
642 		/* cache ->apoll->events */
643 		__poll_t apoll_events;
644 	};
645 
646 	struct io_rsrc_node		*rsrc_node;
647 
648 	atomic_t			refs;
649 	bool				cancel_seq_set;
650 	struct io_task_work		io_task_work;
651 	union {
652 		/*
653 		 * for polled requests, i.e. IORING_OP_POLL_ADD and async armed
654 		 * poll
655 		 */
656 		struct hlist_node	hash_node;
657 
658 		/* for private io_kiocb freeing */
659 		struct rcu_head		rcu_head;
660 	};
661 	/* internal polling, see IORING_FEAT_FAST_POLL */
662 	struct async_poll		*apoll;
663 	/* opcode allocated if it needs to store data for async defer */
664 	void				*async_data;
665 	/* linked requests, IFF REQ_F_HARDLINK or REQ_F_LINK are set */
666 	atomic_t			poll_refs;
667 	struct io_kiocb			*link;
668 	/* custom credentials, valid IFF REQ_F_CREDS is set */
669 	const struct cred		*creds;
670 	struct io_wq_work		work;
671 
672 	struct {
673 		u64			extra1;
674 		u64			extra2;
675 	} big_cqe;
676 };
677 
678 ANDROID_KABI_TYPE_STRING("s#io_kiocb", "structure_type io_kiocb { member union_type { member pointer_type { s#file } file data_member_location(0) , member s#io_cmd_data cmd data_member_location(0) } byte_size(64) data_member_location(0) , member t#u8 opcode data_member_location(64) , member t#u8 iopoll_completed data_member_location(65) , member t#u16 buf_index data_member_location(66) , member base_type unsigned int byte_size(4) encoding(7) nr_tw data_member_location(68) , member t#io_req_flags_t flags data_member_location(72) , member s#io_cqe cqe data_member_location(80) , member pointer_type { s#io_ring_ctx } ctx data_member_location(96) , member pointer_type { s#task_struct } task data_member_location(104) , member union_type { member pointer_type { s#io_mapped_ubuf } imu data_member_location(0) , member pointer_type { s#io_buffer } kbuf data_member_location(0) , member pointer_type { s#io_buffer_list } buf_list data_member_location(0) } byte_size(8) data_member_location(112) , member union_type { member s#io_wq_work_node comp_list data_member_location(0) , member t#__poll_t apoll_events data_member_location(0) } byte_size(8) data_member_location(120) , member pointer_type { s#io_rsrc_node } rsrc_node data_member_location(128) , member t#atomic_t refs data_member_location(136) , member t#bool cancel_seq_set data_member_location(140) , member s#io_task_work io_task_work data_member_location(144) , member s#hlist_node hash_node data_member_location(160) , member pointer_type { s#async_poll } apoll data_member_location(176) , member pointer_type { base_type void } async_data data_member_location(184) , member t#atomic_t poll_refs data_member_location(192) , member pointer_type { s#io_kiocb } link data_member_location(200) , member pointer_type { const_type { s#cred } } creds data_member_location(208) , member s#io_wq_work work data_member_location(216) , member structure_type { member t#u64 extra1 data_member_location(0) , member t#u64 extra2 data_member_location(8) } byte_size(16) big_cqe data_member_location(232) } byte_size(248)");
679 
680 struct io_overflow_cqe {
681 	struct list_head list;
682 	struct io_uring_cqe cqe;
683 };
684 
685 #endif
686