• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_BLKDEV_H
2 #define _LINUX_BLKDEV_H
3 
4 #include <linux/sched.h>
5 
6 #ifdef CONFIG_BLOCK
7 
8 #include <linux/major.h>
9 #include <linux/genhd.h>
10 #include <linux/list.h>
11 #include <linux/llist.h>
12 #include <linux/timer.h>
13 #include <linux/workqueue.h>
14 #include <linux/pagemap.h>
15 #include <linux/backing-dev-defs.h>
16 #include <linux/wait.h>
17 #include <linux/mempool.h>
18 #include <linux/pfn.h>
19 #include <linux/bio.h>
20 #include <linux/stringify.h>
21 #include <linux/gfp.h>
22 #include <linux/bsg.h>
23 #include <linux/smp.h>
24 #include <linux/rcupdate.h>
25 #include <linux/percpu-refcount.h>
26 #include <linux/scatterlist.h>
27 
28 struct module;
29 struct scsi_ioctl_command;
30 
31 struct request_queue;
32 struct elevator_queue;
33 struct blk_trace;
34 struct request;
35 struct sg_io_hdr;
36 struct bsg_job;
37 struct blkcg_gq;
38 struct blk_flush_queue;
39 struct pr_ops;
40 
41 #define BLKDEV_MIN_RQ	4
42 #define BLKDEV_MAX_RQ	128	/* Default maximum */
43 
44 /*
45  * Maximum number of blkcg policies allowed to be registered concurrently.
46  * Defined here to simplify include dependency.
47  */
48 #define BLKCG_MAX_POLS		2
49 
50 typedef void (rq_end_io_fn)(struct request *, int);
51 
52 #define BLK_RL_SYNCFULL		(1U << 0)
53 #define BLK_RL_ASYNCFULL	(1U << 1)
54 
55 struct request_list {
56 	struct request_queue	*q;	/* the queue this rl belongs to */
57 #ifdef CONFIG_BLK_CGROUP
58 	struct blkcg_gq		*blkg;	/* blkg this request pool belongs to */
59 #endif
60 	/*
61 	 * count[], starved[], and wait[] are indexed by
62 	 * BLK_RW_SYNC/BLK_RW_ASYNC
63 	 */
64 	int			count[2];
65 	int			starved[2];
66 	mempool_t		*rq_pool;
67 	wait_queue_head_t	wait[2];
68 	unsigned int		flags;
69 };
70 
71 /*
72  * request command types
73  */
74 enum rq_cmd_type_bits {
75 	REQ_TYPE_FS		= 1,	/* fs request */
76 	REQ_TYPE_BLOCK_PC,		/* scsi command */
77 	REQ_TYPE_DRV_PRIV,		/* driver defined types from here */
78 };
79 
80 #define BLK_MAX_CDB	16
81 
82 /*
83  * Try to put the fields that are referenced together in the same cacheline.
84  *
85  * If you modify this structure, make sure to update blk_rq_init() and
86  * especially blk_mq_rq_ctx_init() to take care of the added fields.
87  */
88 struct request {
89 	struct list_head queuelist;
90 	union {
91 		struct call_single_data csd;
92 		u64 fifo_time;
93 	};
94 
95 	struct request_queue *q;
96 	struct blk_mq_ctx *mq_ctx;
97 
98 	int cpu;
99 	unsigned cmd_type;
100 	u64 cmd_flags;
101 	unsigned long atomic_flags;
102 
103 	/* the following two fields are internal, NEVER access directly */
104 	unsigned int __data_len;	/* total data len */
105 	sector_t __sector;		/* sector cursor */
106 
107 	struct bio *bio;
108 	struct bio *biotail;
109 
110 	/*
111 	 * The hash is used inside the scheduler, and killed once the
112 	 * request reaches the dispatch list. The ipi_list is only used
113 	 * to queue the request for softirq completion, which is long
114 	 * after the request has been unhashed (and even removed from
115 	 * the dispatch list).
116 	 */
117 	union {
118 		struct hlist_node hash;	/* merge hash */
119 		struct list_head ipi_list;
120 	};
121 
122 	/*
123 	 * The rb_node is only used inside the io scheduler, requests
124 	 * are pruned when moved to the dispatch queue. So let the
125 	 * completion_data share space with the rb_node.
126 	 */
127 	union {
128 		struct rb_node rb_node;	/* sort/lookup */
129 		void *completion_data;
130 	};
131 
132 	/*
133 	 * Three pointers are available for the IO schedulers, if they need
134 	 * more they have to dynamically allocate it.  Flush requests are
135 	 * never put on the IO scheduler. So let the flush fields share
136 	 * space with the elevator data.
137 	 */
138 	union {
139 		struct {
140 			struct io_cq		*icq;
141 			void			*priv[2];
142 		} elv;
143 
144 		struct {
145 			unsigned int		seq;
146 			struct list_head	list;
147 			rq_end_io_fn		*saved_end_io;
148 		} flush;
149 	};
150 
151 	struct gendisk *rq_disk;
152 	struct hd_struct *part;
153 	unsigned long start_time;
154 #ifdef CONFIG_BLK_CGROUP
155 	struct request_list *rl;		/* rl this rq is alloced from */
156 	unsigned long long start_time_ns;
157 	unsigned long long io_start_time_ns;    /* when passed to hardware */
158 #endif
159 	/* Number of scatter-gather DMA addr+len pairs after
160 	 * physical address coalescing is performed.
161 	 */
162 	unsigned short nr_phys_segments;
163 #if defined(CONFIG_BLK_DEV_INTEGRITY)
164 	unsigned short nr_integrity_segments;
165 #endif
166 
167 	unsigned short ioprio;
168 
169 	void *special;		/* opaque pointer available for LLD use */
170 
171 	int tag;
172 	int errors;
173 
174 	/*
175 	 * when request is used as a packet command carrier
176 	 */
177 	unsigned char __cmd[BLK_MAX_CDB];
178 	unsigned char *cmd;
179 	unsigned short cmd_len;
180 
181 	unsigned int extra_len;	/* length of alignment and padding */
182 	unsigned int sense_len;
183 	unsigned int resid_len;	/* residual count */
184 	void *sense;
185 
186 	unsigned long deadline;
187 	struct list_head timeout_list;
188 	unsigned int timeout;
189 	int retries;
190 
191 	/*
192 	 * completion callback.
193 	 */
194 	rq_end_io_fn *end_io;
195 	void *end_io_data;
196 
197 	/* for bidi */
198 	struct request *next_rq;
199 
200 	ktime_t			lat_hist_io_start;
201 	int			lat_hist_enabled;
202 };
203 
204 #define REQ_OP_SHIFT (8 * sizeof(u64) - REQ_OP_BITS)
205 #define req_op(req)  ((req)->cmd_flags >> REQ_OP_SHIFT)
206 
207 #define req_set_op(req, op) do {				\
208 	WARN_ON(op >= (1 << REQ_OP_BITS));			\
209 	(req)->cmd_flags &= ((1ULL << REQ_OP_SHIFT) - 1);	\
210 	(req)->cmd_flags |= ((u64) (op) << REQ_OP_SHIFT);	\
211 } while (0)
212 
213 #define req_set_op_attrs(req, op, flags) do {	\
214 	req_set_op(req, op);			\
215 	(req)->cmd_flags |= flags;		\
216 } while (0)
217 
req_get_ioprio(struct request * req)218 static inline unsigned short req_get_ioprio(struct request *req)
219 {
220 	return req->ioprio;
221 }
222 
223 #include <linux/elevator.h>
224 
225 struct blk_queue_ctx;
226 
227 typedef void (request_fn_proc) (struct request_queue *q);
228 typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio);
229 typedef int (prep_rq_fn) (struct request_queue *, struct request *);
230 typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
231 
232 struct bio_vec;
233 typedef void (softirq_done_fn)(struct request *);
234 typedef int (dma_drain_needed_fn)(struct request *);
235 typedef int (lld_busy_fn) (struct request_queue *q);
236 typedef int (bsg_job_fn) (struct bsg_job *);
237 
238 enum blk_eh_timer_return {
239 	BLK_EH_NOT_HANDLED,
240 	BLK_EH_HANDLED,
241 	BLK_EH_RESET_TIMER,
242 };
243 
244 typedef enum blk_eh_timer_return (rq_timed_out_fn)(struct request *);
245 
246 enum blk_queue_state {
247 	Queue_down,
248 	Queue_up,
249 };
250 
251 struct blk_queue_tag {
252 	struct request **tag_index;	/* map of busy tags */
253 	unsigned long *tag_map;		/* bit map of free/busy tags */
254 	int busy;			/* current depth */
255 	int max_depth;			/* what we will send to device */
256 	int real_max_depth;		/* what the array can hold */
257 	atomic_t refcnt;		/* map can be shared */
258 	int alloc_policy;		/* tag allocation policy */
259 	int next_tag;			/* next tag */
260 };
261 #define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
262 #define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
263 
264 #define BLK_SCSI_MAX_CMDS	(256)
265 #define BLK_SCSI_CMD_PER_LONG	(BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
266 
267 struct queue_limits {
268 	unsigned long		bounce_pfn;
269 	unsigned long		seg_boundary_mask;
270 	unsigned long		virt_boundary_mask;
271 
272 	unsigned int		max_hw_sectors;
273 	unsigned int		max_dev_sectors;
274 	unsigned int		chunk_sectors;
275 	unsigned int		max_sectors;
276 	unsigned int		max_segment_size;
277 	unsigned int		physical_block_size;
278 	unsigned int		alignment_offset;
279 	unsigned int		io_min;
280 	unsigned int		io_opt;
281 	unsigned int		max_discard_sectors;
282 	unsigned int		max_hw_discard_sectors;
283 	unsigned int		max_write_same_sectors;
284 	unsigned int		discard_granularity;
285 	unsigned int		discard_alignment;
286 
287 	unsigned short		logical_block_size;
288 	unsigned short		max_segments;
289 	unsigned short		max_integrity_segments;
290 
291 	unsigned char		misaligned;
292 	unsigned char		discard_misaligned;
293 	unsigned char		cluster;
294 	unsigned char		discard_zeroes_data;
295 	unsigned char		raid_partial_stripes_expensive;
296 };
297 
298 struct request_queue {
299 	/*
300 	 * Together with queue_head for cacheline sharing
301 	 */
302 	struct list_head	queue_head;
303 	struct request		*last_merge;
304 	struct elevator_queue	*elevator;
305 	int			nr_rqs[2];	/* # allocated [a]sync rqs */
306 	int			nr_rqs_elvpriv;	/* # allocated rqs w/ elvpriv */
307 
308 	/*
309 	 * If blkcg is not used, @q->root_rl serves all requests.  If blkcg
310 	 * is used, root blkg allocates from @q->root_rl and all other
311 	 * blkgs from their own blkg->rl.  Which one to use should be
312 	 * determined using bio_request_list().
313 	 */
314 	struct request_list	root_rl;
315 
316 	request_fn_proc		*request_fn;
317 	make_request_fn		*make_request_fn;
318 	prep_rq_fn		*prep_rq_fn;
319 	unprep_rq_fn		*unprep_rq_fn;
320 	softirq_done_fn		*softirq_done_fn;
321 	rq_timed_out_fn		*rq_timed_out_fn;
322 	dma_drain_needed_fn	*dma_drain_needed;
323 	lld_busy_fn		*lld_busy_fn;
324 
325 	struct blk_mq_ops	*mq_ops;
326 
327 	unsigned int		*mq_map;
328 
329 	/* sw queues */
330 	struct blk_mq_ctx __percpu	*queue_ctx;
331 	unsigned int		nr_queues;
332 
333 	/* hw dispatch queues */
334 	struct blk_mq_hw_ctx	**queue_hw_ctx;
335 	unsigned int		nr_hw_queues;
336 
337 	/*
338 	 * Dispatch queue sorting
339 	 */
340 	sector_t		end_sector;
341 	struct request		*boundary_rq;
342 
343 	/*
344 	 * Delayed queue handling
345 	 */
346 	struct delayed_work	delay_work;
347 
348 	struct backing_dev_info	backing_dev_info;
349 
350 	/*
351 	 * The queue owner gets to use this for whatever they like.
352 	 * ll_rw_blk doesn't touch it.
353 	 */
354 	void			*queuedata;
355 
356 	/*
357 	 * various queue flags, see QUEUE_* below
358 	 */
359 	unsigned long		queue_flags;
360 
361 	/*
362 	 * ida allocated id for this queue.  Used to index queues from
363 	 * ioctx.
364 	 */
365 	int			id;
366 
367 	/*
368 	 * queue needs bounce pages for pages above this limit
369 	 */
370 	gfp_t			bounce_gfp;
371 
372 	/*
373 	 * protects queue structures from reentrancy. ->__queue_lock should
374 	 * _never_ be used directly, it is queue private. always use
375 	 * ->queue_lock.
376 	 */
377 	spinlock_t		__queue_lock;
378 	spinlock_t		*queue_lock;
379 
380 	/*
381 	 * queue kobject
382 	 */
383 	struct kobject kobj;
384 
385 	/*
386 	 * mq queue kobject
387 	 */
388 	struct kobject mq_kobj;
389 
390 #ifdef  CONFIG_BLK_DEV_INTEGRITY
391 	struct blk_integrity integrity;
392 #endif	/* CONFIG_BLK_DEV_INTEGRITY */
393 
394 #ifdef CONFIG_PM
395 	struct device		*dev;
396 	int			rpm_status;
397 	unsigned int		nr_pending;
398 #endif
399 
400 	/*
401 	 * queue settings
402 	 */
403 	unsigned long		nr_requests;	/* Max # of requests */
404 	unsigned int		nr_congestion_on;
405 	unsigned int		nr_congestion_off;
406 	unsigned int		nr_batching;
407 
408 	unsigned int		dma_drain_size;
409 	void			*dma_drain_buffer;
410 	unsigned int		dma_pad_mask;
411 	unsigned int		dma_alignment;
412 
413 	struct blk_queue_tag	*queue_tags;
414 	struct list_head	tag_busy_list;
415 
416 	unsigned int		nr_sorted;
417 	unsigned int		in_flight[2];
418 	/*
419 	 * Number of active block driver functions for which blk_drain_queue()
420 	 * must wait. Must be incremented around functions that unlock the
421 	 * queue_lock internally, e.g. scsi_request_fn().
422 	 */
423 	unsigned int		request_fn_active;
424 
425 	unsigned int		rq_timeout;
426 	struct timer_list	timeout;
427 	struct work_struct	timeout_work;
428 	struct list_head	timeout_list;
429 
430 	struct list_head	icq_list;
431 #ifdef CONFIG_BLK_CGROUP
432 	DECLARE_BITMAP		(blkcg_pols, BLKCG_MAX_POLS);
433 	struct blkcg_gq		*root_blkg;
434 	struct list_head	blkg_list;
435 #endif
436 
437 	struct queue_limits	limits;
438 
439 	/*
440 	 * sg stuff
441 	 */
442 	unsigned int		sg_timeout;
443 	unsigned int		sg_reserved_size;
444 	int			node;
445 #ifdef CONFIG_BLK_DEV_IO_TRACE
446 	struct blk_trace	*blk_trace;
447 #endif
448 	/*
449 	 * for flush operations
450 	 */
451 	struct blk_flush_queue	*fq;
452 
453 	struct list_head	requeue_list;
454 	spinlock_t		requeue_lock;
455 	struct delayed_work	requeue_work;
456 
457 	struct mutex		sysfs_lock;
458 
459 	int			bypass_depth;
460 	atomic_t		mq_freeze_depth;
461 
462 #if defined(CONFIG_BLK_DEV_BSG)
463 	bsg_job_fn		*bsg_job_fn;
464 	int			bsg_job_size;
465 	struct bsg_class_device bsg_dev;
466 #endif
467 
468 #ifdef CONFIG_BLK_DEV_THROTTLING
469 	/* Throttle data */
470 	struct throtl_data *td;
471 #endif
472 	struct rcu_head		rcu_head;
473 	wait_queue_head_t	mq_freeze_wq;
474 	struct percpu_ref	q_usage_counter;
475 	struct list_head	all_q_node;
476 
477 	struct blk_mq_tag_set	*tag_set;
478 	struct list_head	tag_set_list;
479 	struct bio_set		*bio_split;
480 
481 	bool			mq_sysfs_init_done;
482 };
483 
484 #define QUEUE_FLAG_QUEUED	1	/* uses generic tag queueing */
485 #define QUEUE_FLAG_STOPPED	2	/* queue is stopped */
486 #define	QUEUE_FLAG_SYNCFULL	3	/* read queue has been filled */
487 #define QUEUE_FLAG_ASYNCFULL	4	/* write queue has been filled */
488 #define QUEUE_FLAG_DYING	5	/* queue being torn down */
489 #define QUEUE_FLAG_BYPASS	6	/* act as dumb FIFO queue */
490 #define QUEUE_FLAG_BIDI		7	/* queue supports bidi requests */
491 #define QUEUE_FLAG_NOMERGES     8	/* disable merge attempts */
492 #define QUEUE_FLAG_SAME_COMP	9	/* complete on same CPU-group */
493 #define QUEUE_FLAG_FAIL_IO     10	/* fake timeout */
494 #define QUEUE_FLAG_STACKABLE   11	/* supports request stacking */
495 #define QUEUE_FLAG_NONROT      12	/* non-rotational device (SSD) */
496 #define QUEUE_FLAG_VIRT        QUEUE_FLAG_NONROT /* paravirt device */
497 #define QUEUE_FLAG_IO_STAT     13	/* do IO stats */
498 #define QUEUE_FLAG_DISCARD     14	/* supports DISCARD */
499 #define QUEUE_FLAG_NOXMERGES   15	/* No extended merges */
500 #define QUEUE_FLAG_ADD_RANDOM  16	/* Contributes to random pool */
501 #define QUEUE_FLAG_SECERASE    17	/* supports secure erase */
502 #define QUEUE_FLAG_SAME_FORCE  18	/* force complete on same CPU */
503 #define QUEUE_FLAG_DEAD        19	/* queue tear-down finished */
504 #define QUEUE_FLAG_INIT_DONE   20	/* queue is initialized */
505 #define QUEUE_FLAG_NO_SG_MERGE 21	/* don't attempt to merge SG segments*/
506 #define QUEUE_FLAG_POLL	       22	/* IO polling enabled if set */
507 #define QUEUE_FLAG_WC	       23	/* Write back caching */
508 #define QUEUE_FLAG_FUA	       24	/* device supports FUA writes */
509 #define QUEUE_FLAG_FLUSH_NQ    25	/* flush not queueuable */
510 #define QUEUE_FLAG_DAX         26	/* device supports DAX */
511 
512 #define QUEUE_FLAG_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
513 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
514 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
515 				 (1 << QUEUE_FLAG_ADD_RANDOM))
516 
517 #define QUEUE_FLAG_MQ_DEFAULT	((1 << QUEUE_FLAG_IO_STAT) |		\
518 				 (1 << QUEUE_FLAG_STACKABLE)	|	\
519 				 (1 << QUEUE_FLAG_SAME_COMP)	|	\
520 				 (1 << QUEUE_FLAG_POLL))
521 
queue_lockdep_assert_held(struct request_queue * q)522 static inline void queue_lockdep_assert_held(struct request_queue *q)
523 {
524 	if (q->queue_lock)
525 		lockdep_assert_held(q->queue_lock);
526 }
527 
queue_flag_set_unlocked(unsigned int flag,struct request_queue * q)528 static inline void queue_flag_set_unlocked(unsigned int flag,
529 					   struct request_queue *q)
530 {
531 	__set_bit(flag, &q->queue_flags);
532 }
533 
queue_flag_test_and_clear(unsigned int flag,struct request_queue * q)534 static inline int queue_flag_test_and_clear(unsigned int flag,
535 					    struct request_queue *q)
536 {
537 	queue_lockdep_assert_held(q);
538 
539 	if (test_bit(flag, &q->queue_flags)) {
540 		__clear_bit(flag, &q->queue_flags);
541 		return 1;
542 	}
543 
544 	return 0;
545 }
546 
queue_flag_test_and_set(unsigned int flag,struct request_queue * q)547 static inline int queue_flag_test_and_set(unsigned int flag,
548 					  struct request_queue *q)
549 {
550 	queue_lockdep_assert_held(q);
551 
552 	if (!test_bit(flag, &q->queue_flags)) {
553 		__set_bit(flag, &q->queue_flags);
554 		return 0;
555 	}
556 
557 	return 1;
558 }
559 
queue_flag_set(unsigned int flag,struct request_queue * q)560 static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
561 {
562 	queue_lockdep_assert_held(q);
563 	__set_bit(flag, &q->queue_flags);
564 }
565 
queue_flag_clear_unlocked(unsigned int flag,struct request_queue * q)566 static inline void queue_flag_clear_unlocked(unsigned int flag,
567 					     struct request_queue *q)
568 {
569 	__clear_bit(flag, &q->queue_flags);
570 }
571 
queue_in_flight(struct request_queue * q)572 static inline int queue_in_flight(struct request_queue *q)
573 {
574 	return q->in_flight[0] + q->in_flight[1];
575 }
576 
queue_flag_clear(unsigned int flag,struct request_queue * q)577 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
578 {
579 	queue_lockdep_assert_held(q);
580 	__clear_bit(flag, &q->queue_flags);
581 }
582 
583 #define blk_queue_tagged(q)	test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
584 #define blk_queue_stopped(q)	test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
585 #define blk_queue_dying(q)	test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
586 #define blk_queue_dead(q)	test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
587 #define blk_queue_bypass(q)	test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
588 #define blk_queue_init_done(q)	test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
589 #define blk_queue_nomerges(q)	test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
590 #define blk_queue_noxmerges(q)	\
591 	test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
592 #define blk_queue_nonrot(q)	test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
593 #define blk_queue_io_stat(q)	test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
594 #define blk_queue_add_random(q)	test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
595 #define blk_queue_stackable(q)	\
596 	test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
597 #define blk_queue_discard(q)	test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
598 #define blk_queue_secure_erase(q) \
599 	(test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
600 #define blk_queue_dax(q)	test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
601 
602 #define blk_noretry_request(rq) \
603 	((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
604 			     REQ_FAILFAST_DRIVER))
605 
606 #define blk_account_rq(rq) \
607 	(((rq)->cmd_flags & REQ_STARTED) && \
608 	 ((rq)->cmd_type == REQ_TYPE_FS))
609 
610 #define blk_rq_cpu_valid(rq)	((rq)->cpu != -1)
611 #define blk_bidi_rq(rq)		((rq)->next_rq != NULL)
612 /* rq->queuelist of dequeued request must be list_empty() */
613 #define blk_queued_rq(rq)	(!list_empty(&(rq)->queuelist))
614 
615 #define list_entry_rq(ptr)	list_entry((ptr), struct request, queuelist)
616 
617 #define rq_data_dir(rq)		(op_is_write(req_op(rq)) ? WRITE : READ)
618 
619 /*
620  * Driver can handle struct request, if it either has an old style
621  * request_fn defined, or is blk-mq based.
622  */
queue_is_rq_based(struct request_queue * q)623 static inline bool queue_is_rq_based(struct request_queue *q)
624 {
625 	return q->request_fn || q->mq_ops;
626 }
627 
blk_queue_cluster(struct request_queue * q)628 static inline unsigned int blk_queue_cluster(struct request_queue *q)
629 {
630 	return q->limits.cluster;
631 }
632 
633 /*
634  * We regard a request as sync, if either a read or a sync write
635  */
rw_is_sync(int op,unsigned int rw_flags)636 static inline bool rw_is_sync(int op, unsigned int rw_flags)
637 {
638 	return op == REQ_OP_READ || (rw_flags & REQ_SYNC);
639 }
640 
rq_is_sync(struct request * rq)641 static inline bool rq_is_sync(struct request *rq)
642 {
643 	return rw_is_sync(req_op(rq), rq->cmd_flags);
644 }
645 
blk_rl_full(struct request_list * rl,bool sync)646 static inline bool blk_rl_full(struct request_list *rl, bool sync)
647 {
648 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
649 
650 	return rl->flags & flag;
651 }
652 
blk_set_rl_full(struct request_list * rl,bool sync)653 static inline void blk_set_rl_full(struct request_list *rl, bool sync)
654 {
655 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
656 
657 	rl->flags |= flag;
658 }
659 
blk_clear_rl_full(struct request_list * rl,bool sync)660 static inline void blk_clear_rl_full(struct request_list *rl, bool sync)
661 {
662 	unsigned int flag = sync ? BLK_RL_SYNCFULL : BLK_RL_ASYNCFULL;
663 
664 	rl->flags &= ~flag;
665 }
666 
rq_mergeable(struct request * rq)667 static inline bool rq_mergeable(struct request *rq)
668 {
669 	if (rq->cmd_type != REQ_TYPE_FS)
670 		return false;
671 
672 	if (req_op(rq) == REQ_OP_FLUSH)
673 		return false;
674 
675 	if (rq->cmd_flags & REQ_NOMERGE_FLAGS)
676 		return false;
677 
678 	return true;
679 }
680 
blk_write_same_mergeable(struct bio * a,struct bio * b)681 static inline bool blk_write_same_mergeable(struct bio *a, struct bio *b)
682 {
683 	if (bio_data(a) == bio_data(b))
684 		return true;
685 
686 	return false;
687 }
688 
689 /*
690  * q->prep_rq_fn return values
691  */
692 enum {
693 	BLKPREP_OK,		/* serve it */
694 	BLKPREP_KILL,		/* fatal error, kill, return -EIO */
695 	BLKPREP_DEFER,		/* leave on queue */
696 	BLKPREP_INVALID,	/* invalid command, kill, return -EREMOTEIO */
697 };
698 
699 extern unsigned long blk_max_low_pfn, blk_max_pfn;
700 
701 /*
702  * standard bounce addresses:
703  *
704  * BLK_BOUNCE_HIGH	: bounce all highmem pages
705  * BLK_BOUNCE_ANY	: don't bounce anything
706  * BLK_BOUNCE_ISA	: bounce pages above ISA DMA boundary
707  */
708 
709 #if BITS_PER_LONG == 32
710 #define BLK_BOUNCE_HIGH		((u64)blk_max_low_pfn << PAGE_SHIFT)
711 #else
712 #define BLK_BOUNCE_HIGH		-1ULL
713 #endif
714 #define BLK_BOUNCE_ANY		(-1ULL)
715 #define BLK_BOUNCE_ISA		(DMA_BIT_MASK(24))
716 
717 /*
718  * default timeout for SG_IO if none specified
719  */
720 #define BLK_DEFAULT_SG_TIMEOUT	(60 * HZ)
721 #define BLK_MIN_SG_TIMEOUT	(7 * HZ)
722 
723 #ifdef CONFIG_BOUNCE
724 extern int init_emergency_isa_pool(void);
725 extern void blk_queue_bounce(struct request_queue *q, struct bio **bio);
726 #else
init_emergency_isa_pool(void)727 static inline int init_emergency_isa_pool(void)
728 {
729 	return 0;
730 }
blk_queue_bounce(struct request_queue * q,struct bio ** bio)731 static inline void blk_queue_bounce(struct request_queue *q, struct bio **bio)
732 {
733 }
734 #endif /* CONFIG_MMU */
735 
736 struct rq_map_data {
737 	struct page **pages;
738 	int page_order;
739 	int nr_entries;
740 	unsigned long offset;
741 	int null_mapped;
742 	int from_user;
743 };
744 
745 struct req_iterator {
746 	struct bvec_iter iter;
747 	struct bio *bio;
748 };
749 
750 /* This should not be used directly - use rq_for_each_segment */
751 #define for_each_bio(_bio)		\
752 	for (; _bio; _bio = _bio->bi_next)
753 #define __rq_for_each_bio(_bio, rq)	\
754 	if ((rq->bio))			\
755 		for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
756 
757 #define rq_for_each_segment(bvl, _rq, _iter)			\
758 	__rq_for_each_bio(_iter.bio, _rq)			\
759 		bio_for_each_segment(bvl, _iter.bio, _iter.iter)
760 
761 #define rq_iter_last(bvec, _iter)				\
762 		(_iter.bio->bi_next == NULL &&			\
763 		 bio_iter_last(bvec, _iter.iter))
764 
765 #ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
766 # error	"You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
767 #endif
768 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
769 extern void rq_flush_dcache_pages(struct request *rq);
770 #else
rq_flush_dcache_pages(struct request * rq)771 static inline void rq_flush_dcache_pages(struct request *rq)
772 {
773 }
774 #endif
775 
776 #ifdef CONFIG_PRINTK
777 #define vfs_msg(sb, level, fmt, ...)				\
778 	__vfs_msg(sb, level, fmt, ##__VA_ARGS__)
779 #else
780 #define vfs_msg(sb, level, fmt, ...)				\
781 do {								\
782 	no_printk(fmt, ##__VA_ARGS__);				\
783 	__vfs_msg(sb, "", " ");					\
784 } while (0)
785 #endif
786 
787 extern int blk_register_queue(struct gendisk *disk);
788 extern void blk_unregister_queue(struct gendisk *disk);
789 extern blk_qc_t generic_make_request(struct bio *bio);
790 extern void blk_rq_init(struct request_queue *q, struct request *rq);
791 extern void blk_put_request(struct request *);
792 extern void __blk_put_request(struct request_queue *, struct request *);
793 extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
794 extern void blk_rq_set_block_pc(struct request *);
795 extern void blk_requeue_request(struct request_queue *, struct request *);
796 extern void blk_add_request_payload(struct request *rq, struct page *page,
797 		int offset, unsigned int len);
798 extern int blk_lld_busy(struct request_queue *q);
799 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
800 			     struct bio_set *bs, gfp_t gfp_mask,
801 			     int (*bio_ctr)(struct bio *, struct bio *, void *),
802 			     void *data);
803 extern void blk_rq_unprep_clone(struct request *rq);
804 extern int blk_insert_cloned_request(struct request_queue *q,
805 				     struct request *rq);
806 extern int blk_rq_append_bio(struct request *rq, struct bio *bio);
807 extern void blk_delay_queue(struct request_queue *, unsigned long);
808 extern void blk_queue_split(struct request_queue *, struct bio **,
809 			    struct bio_set *);
810 extern void blk_recount_segments(struct request_queue *, struct bio *);
811 extern int scsi_verify_blk_ioctl(struct block_device *, unsigned int);
812 extern int scsi_cmd_blk_ioctl(struct block_device *, fmode_t,
813 			      unsigned int, void __user *);
814 extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
815 			  unsigned int, void __user *);
816 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
817 			 struct scsi_ioctl_command __user *);
818 
819 extern int blk_queue_enter(struct request_queue *q, bool nowait);
820 extern void blk_queue_exit(struct request_queue *q);
821 extern void blk_start_queue(struct request_queue *q);
822 extern void blk_start_queue_async(struct request_queue *q);
823 extern void blk_stop_queue(struct request_queue *q);
824 extern void blk_sync_queue(struct request_queue *q);
825 extern void __blk_stop_queue(struct request_queue *q);
826 extern void __blk_run_queue(struct request_queue *q);
827 extern void __blk_run_queue_uncond(struct request_queue *q);
828 extern void blk_run_queue(struct request_queue *);
829 extern void blk_run_queue_async(struct request_queue *q);
830 extern int blk_rq_map_user(struct request_queue *, struct request *,
831 			   struct rq_map_data *, void __user *, unsigned long,
832 			   gfp_t);
833 extern int blk_rq_unmap_user(struct bio *);
834 extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, unsigned int, gfp_t);
835 extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
836 			       struct rq_map_data *, const struct iov_iter *,
837 			       gfp_t);
838 extern int blk_execute_rq(struct request_queue *, struct gendisk *,
839 			  struct request *, int);
840 extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
841 				  struct request *, int, rq_end_io_fn *);
842 
843 bool blk_poll(struct request_queue *q, blk_qc_t cookie);
844 
bdev_get_queue(struct block_device * bdev)845 static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
846 {
847 	return bdev->bd_disk->queue;	/* this is never NULL */
848 }
849 
850 /*
851  * blk_rq_pos()			: the current sector
852  * blk_rq_bytes()		: bytes left in the entire request
853  * blk_rq_cur_bytes()		: bytes left in the current segment
854  * blk_rq_err_bytes()		: bytes left till the next error boundary
855  * blk_rq_sectors()		: sectors left in the entire request
856  * blk_rq_cur_sectors()		: sectors left in the current segment
857  */
blk_rq_pos(const struct request * rq)858 static inline sector_t blk_rq_pos(const struct request *rq)
859 {
860 	return rq->__sector;
861 }
862 
blk_rq_bytes(const struct request * rq)863 static inline unsigned int blk_rq_bytes(const struct request *rq)
864 {
865 	return rq->__data_len;
866 }
867 
blk_rq_cur_bytes(const struct request * rq)868 static inline int blk_rq_cur_bytes(const struct request *rq)
869 {
870 	return rq->bio ? bio_cur_bytes(rq->bio) : 0;
871 }
872 
873 extern unsigned int blk_rq_err_bytes(const struct request *rq);
874 
blk_rq_sectors(const struct request * rq)875 static inline unsigned int blk_rq_sectors(const struct request *rq)
876 {
877 	return blk_rq_bytes(rq) >> 9;
878 }
879 
blk_rq_cur_sectors(const struct request * rq)880 static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
881 {
882 	return blk_rq_cur_bytes(rq) >> 9;
883 }
884 
blk_queue_get_max_sectors(struct request_queue * q,int op)885 static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
886 						     int op)
887 {
888 	if (unlikely(op == REQ_OP_DISCARD || op == REQ_OP_SECURE_ERASE))
889 		return min(q->limits.max_discard_sectors, UINT_MAX >> 9);
890 
891 	if (unlikely(op == REQ_OP_WRITE_SAME))
892 		return q->limits.max_write_same_sectors;
893 
894 	return q->limits.max_sectors;
895 }
896 
897 /*
898  * Return maximum size of a request at given offset. Only valid for
899  * file system requests.
900  */
blk_max_size_offset(struct request_queue * q,sector_t offset)901 static inline unsigned int blk_max_size_offset(struct request_queue *q,
902 					       sector_t offset)
903 {
904 	if (!q->limits.chunk_sectors)
905 		return q->limits.max_sectors;
906 
907 	return q->limits.chunk_sectors -
908 			(offset & (q->limits.chunk_sectors - 1));
909 }
910 
blk_rq_get_max_sectors(struct request * rq,sector_t offset)911 static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
912 						  sector_t offset)
913 {
914 	struct request_queue *q = rq->q;
915 
916 	if (unlikely(rq->cmd_type != REQ_TYPE_FS))
917 		return q->limits.max_hw_sectors;
918 
919 	if (!q->limits.chunk_sectors ||
920 	    req_op(rq) == REQ_OP_DISCARD ||
921 	    req_op(rq) == REQ_OP_SECURE_ERASE)
922 		return blk_queue_get_max_sectors(q, req_op(rq));
923 
924 	return min(blk_max_size_offset(q, offset),
925 			blk_queue_get_max_sectors(q, req_op(rq)));
926 }
927 
blk_rq_count_bios(struct request * rq)928 static inline unsigned int blk_rq_count_bios(struct request *rq)
929 {
930 	unsigned int nr_bios = 0;
931 	struct bio *bio;
932 
933 	__rq_for_each_bio(bio, rq)
934 		nr_bios++;
935 
936 	return nr_bios;
937 }
938 
939 /*
940  * Request issue related functions.
941  */
942 extern struct request *blk_peek_request(struct request_queue *q);
943 extern void blk_start_request(struct request *rq);
944 extern struct request *blk_fetch_request(struct request_queue *q);
945 
946 /*
947  * Request completion related functions.
948  *
949  * blk_update_request() completes given number of bytes and updates
950  * the request without completing it.
951  *
952  * blk_end_request() and friends.  __blk_end_request() must be called
953  * with the request queue spinlock acquired.
954  *
955  * Several drivers define their own end_request and call
956  * blk_end_request() for parts of the original function.
957  * This prevents code duplication in drivers.
958  */
959 extern bool blk_update_request(struct request *rq, int error,
960 			       unsigned int nr_bytes);
961 extern void blk_finish_request(struct request *rq, int error);
962 extern bool blk_end_request(struct request *rq, int error,
963 			    unsigned int nr_bytes);
964 extern void blk_end_request_all(struct request *rq, int error);
965 extern bool blk_end_request_cur(struct request *rq, int error);
966 extern bool blk_end_request_err(struct request *rq, int error);
967 extern bool __blk_end_request(struct request *rq, int error,
968 			      unsigned int nr_bytes);
969 extern void __blk_end_request_all(struct request *rq, int error);
970 extern bool __blk_end_request_cur(struct request *rq, int error);
971 extern bool __blk_end_request_err(struct request *rq, int error);
972 
973 extern void blk_complete_request(struct request *);
974 extern void __blk_complete_request(struct request *);
975 extern void blk_abort_request(struct request *);
976 extern void blk_unprep_request(struct request *);
977 
978 /*
979  * Access functions for manipulating queue properties
980  */
981 extern struct request_queue *blk_init_queue_node(request_fn_proc *rfn,
982 					spinlock_t *lock, int node_id);
983 extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *);
984 extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
985 						      request_fn_proc *, spinlock_t *);
986 extern void blk_cleanup_queue(struct request_queue *);
987 extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
988 extern void blk_queue_bounce_limit(struct request_queue *, u64);
989 extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
990 extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
991 extern void blk_queue_max_segments(struct request_queue *, unsigned short);
992 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
993 extern void blk_queue_max_discard_sectors(struct request_queue *q,
994 		unsigned int max_discard_sectors);
995 extern void blk_queue_max_write_same_sectors(struct request_queue *q,
996 		unsigned int max_write_same_sectors);
997 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
998 extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
999 extern void blk_queue_alignment_offset(struct request_queue *q,
1000 				       unsigned int alignment);
1001 extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
1002 extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
1003 extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
1004 extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
1005 extern void blk_set_default_limits(struct queue_limits *lim);
1006 extern void blk_set_stacking_limits(struct queue_limits *lim);
1007 extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
1008 			    sector_t offset);
1009 extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
1010 			    sector_t offset);
1011 extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
1012 			      sector_t offset);
1013 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
1014 extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
1015 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
1016 extern int blk_queue_dma_drain(struct request_queue *q,
1017 			       dma_drain_needed_fn *dma_drain_needed,
1018 			       void *buf, unsigned int size);
1019 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn);
1020 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long);
1021 extern void blk_queue_virt_boundary(struct request_queue *, unsigned long);
1022 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn);
1023 extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn);
1024 extern void blk_queue_dma_alignment(struct request_queue *, int);
1025 extern void blk_queue_update_dma_alignment(struct request_queue *, int);
1026 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
1027 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
1028 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
1029 extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
1030 extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua);
1031 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
1032 
1033 extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
1034 extern void blk_dump_rq_flags(struct request *, char *);
1035 extern long nr_blockdev_pages(void);
1036 
1037 bool __must_check blk_get_queue(struct request_queue *);
1038 struct request_queue *blk_alloc_queue(gfp_t);
1039 struct request_queue *blk_alloc_queue_node(gfp_t, int);
1040 extern void blk_put_queue(struct request_queue *);
1041 extern void blk_set_queue_dying(struct request_queue *);
1042 
1043 /*
1044  * block layer runtime pm functions
1045  */
1046 #ifdef CONFIG_PM
1047 extern void blk_pm_runtime_init(struct request_queue *q, struct device *dev);
1048 extern int blk_pre_runtime_suspend(struct request_queue *q);
1049 extern void blk_post_runtime_suspend(struct request_queue *q, int err);
1050 extern void blk_pre_runtime_resume(struct request_queue *q);
1051 extern void blk_post_runtime_resume(struct request_queue *q, int err);
1052 extern void blk_set_runtime_active(struct request_queue *q);
1053 #else
blk_pm_runtime_init(struct request_queue * q,struct device * dev)1054 static inline void blk_pm_runtime_init(struct request_queue *q,
1055 	struct device *dev) {}
blk_pre_runtime_suspend(struct request_queue * q)1056 static inline int blk_pre_runtime_suspend(struct request_queue *q)
1057 {
1058 	return -ENOSYS;
1059 }
blk_post_runtime_suspend(struct request_queue * q,int err)1060 static inline void blk_post_runtime_suspend(struct request_queue *q, int err) {}
blk_pre_runtime_resume(struct request_queue * q)1061 static inline void blk_pre_runtime_resume(struct request_queue *q) {}
blk_post_runtime_resume(struct request_queue * q,int err)1062 static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
blk_set_runtime_active(struct request_queue * q)1063 static inline void blk_set_runtime_active(struct request_queue *q) {}
1064 #endif
1065 
1066 /*
1067  * blk_plug permits building a queue of related requests by holding the I/O
1068  * fragments for a short period. This allows merging of sequential requests
1069  * into single larger request. As the requests are moved from a per-task list to
1070  * the device's request_queue in a batch, this results in improved scalability
1071  * as the lock contention for request_queue lock is reduced.
1072  *
1073  * It is ok not to disable preemption when adding the request to the plug list
1074  * or when attempting a merge, because blk_schedule_flush_list() will only flush
1075  * the plug list when the task sleeps by itself. For details, please see
1076  * schedule() where blk_schedule_flush_plug() is called.
1077  */
1078 struct blk_plug {
1079 	struct list_head list; /* requests */
1080 	struct list_head mq_list; /* blk-mq requests */
1081 	struct list_head cb_list; /* md requires an unplug callback */
1082 };
1083 #define BLK_MAX_REQUEST_COUNT 16
1084 
1085 struct blk_plug_cb;
1086 typedef void (*blk_plug_cb_fn)(struct blk_plug_cb *, bool);
1087 struct blk_plug_cb {
1088 	struct list_head list;
1089 	blk_plug_cb_fn callback;
1090 	void *data;
1091 };
1092 extern struct blk_plug_cb *blk_check_plugged(blk_plug_cb_fn unplug,
1093 					     void *data, int size);
1094 extern void blk_start_plug(struct blk_plug *);
1095 extern void blk_finish_plug(struct blk_plug *);
1096 extern void blk_flush_plug_list(struct blk_plug *, bool);
1097 
blk_flush_plug(struct task_struct * tsk)1098 static inline void blk_flush_plug(struct task_struct *tsk)
1099 {
1100 	struct blk_plug *plug = tsk->plug;
1101 
1102 	if (plug)
1103 		blk_flush_plug_list(plug, false);
1104 }
1105 
blk_schedule_flush_plug(struct task_struct * tsk)1106 static inline void blk_schedule_flush_plug(struct task_struct *tsk)
1107 {
1108 	struct blk_plug *plug = tsk->plug;
1109 
1110 	if (plug)
1111 		blk_flush_plug_list(plug, true);
1112 }
1113 
blk_needs_flush_plug(struct task_struct * tsk)1114 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1115 {
1116 	struct blk_plug *plug = tsk->plug;
1117 
1118 	return plug &&
1119 		(!list_empty(&plug->list) ||
1120 		 !list_empty(&plug->mq_list) ||
1121 		 !list_empty(&plug->cb_list));
1122 }
1123 
1124 /*
1125  * tag stuff
1126  */
1127 extern int blk_queue_start_tag(struct request_queue *, struct request *);
1128 extern struct request *blk_queue_find_tag(struct request_queue *, int);
1129 extern void blk_queue_end_tag(struct request_queue *, struct request *);
1130 extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1131 extern void blk_queue_free_tags(struct request_queue *);
1132 extern int blk_queue_resize_tags(struct request_queue *, int);
1133 extern void blk_queue_invalidate_tags(struct request_queue *);
1134 extern struct blk_queue_tag *blk_init_tags(int, int);
1135 extern void blk_free_tags(struct blk_queue_tag *);
1136 
blk_map_queue_find_tag(struct blk_queue_tag * bqt,int tag)1137 static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1138 						int tag)
1139 {
1140 	if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1141 		return NULL;
1142 	return bqt->tag_index[tag];
1143 }
1144 
1145 
1146 #define BLKDEV_DISCARD_SECURE	(1 << 0)	/* issue a secure erase */
1147 #define BLKDEV_DISCARD_ZERO	(1 << 1)	/* must reliably zero data */
1148 
1149 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1150 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1151 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
1152 extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
1153 		sector_t nr_sects, gfp_t gfp_mask, int flags,
1154 		struct bio **biop);
1155 extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1156 		sector_t nr_sects, gfp_t gfp_mask, struct page *page);
1157 extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
1158 		sector_t nr_sects, gfp_t gfp_mask, bool discard);
sb_issue_discard(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask,unsigned long flags)1159 static inline int sb_issue_discard(struct super_block *sb, sector_t block,
1160 		sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
1161 {
1162 	return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
1163 				    nr_blocks << (sb->s_blocksize_bits - 9),
1164 				    gfp_mask, flags);
1165 }
sb_issue_zeroout(struct super_block * sb,sector_t block,sector_t nr_blocks,gfp_t gfp_mask)1166 static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
1167 		sector_t nr_blocks, gfp_t gfp_mask)
1168 {
1169 	return blkdev_issue_zeroout(sb->s_bdev,
1170 				    block << (sb->s_blocksize_bits - 9),
1171 				    nr_blocks << (sb->s_blocksize_bits - 9),
1172 				    gfp_mask, true);
1173 }
1174 
1175 extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
1176 
1177 enum blk_default_limits {
1178 	BLK_MAX_SEGMENTS	= 128,
1179 	BLK_SAFE_MAX_SECTORS	= 255,
1180 	BLK_DEF_MAX_SECTORS	= 2560,
1181 	BLK_MAX_SEGMENT_SIZE	= 65536,
1182 	BLK_SEG_BOUNDARY_MASK	= 0xFFFFFFFFUL,
1183 };
1184 
1185 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
1186 
queue_bounce_pfn(struct request_queue * q)1187 static inline unsigned long queue_bounce_pfn(struct request_queue *q)
1188 {
1189 	return q->limits.bounce_pfn;
1190 }
1191 
queue_segment_boundary(struct request_queue * q)1192 static inline unsigned long queue_segment_boundary(struct request_queue *q)
1193 {
1194 	return q->limits.seg_boundary_mask;
1195 }
1196 
queue_virt_boundary(struct request_queue * q)1197 static inline unsigned long queue_virt_boundary(struct request_queue *q)
1198 {
1199 	return q->limits.virt_boundary_mask;
1200 }
1201 
queue_max_sectors(struct request_queue * q)1202 static inline unsigned int queue_max_sectors(struct request_queue *q)
1203 {
1204 	return q->limits.max_sectors;
1205 }
1206 
queue_max_hw_sectors(struct request_queue * q)1207 static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
1208 {
1209 	return q->limits.max_hw_sectors;
1210 }
1211 
queue_max_segments(struct request_queue * q)1212 static inline unsigned short queue_max_segments(struct request_queue *q)
1213 {
1214 	return q->limits.max_segments;
1215 }
1216 
queue_max_segment_size(struct request_queue * q)1217 static inline unsigned int queue_max_segment_size(struct request_queue *q)
1218 {
1219 	return q->limits.max_segment_size;
1220 }
1221 
queue_logical_block_size(struct request_queue * q)1222 static inline unsigned short queue_logical_block_size(struct request_queue *q)
1223 {
1224 	int retval = 512;
1225 
1226 	if (q && q->limits.logical_block_size)
1227 		retval = q->limits.logical_block_size;
1228 
1229 	return retval;
1230 }
1231 
bdev_logical_block_size(struct block_device * bdev)1232 static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
1233 {
1234 	return queue_logical_block_size(bdev_get_queue(bdev));
1235 }
1236 
queue_physical_block_size(struct request_queue * q)1237 static inline unsigned int queue_physical_block_size(struct request_queue *q)
1238 {
1239 	return q->limits.physical_block_size;
1240 }
1241 
bdev_physical_block_size(struct block_device * bdev)1242 static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1243 {
1244 	return queue_physical_block_size(bdev_get_queue(bdev));
1245 }
1246 
queue_io_min(struct request_queue * q)1247 static inline unsigned int queue_io_min(struct request_queue *q)
1248 {
1249 	return q->limits.io_min;
1250 }
1251 
bdev_io_min(struct block_device * bdev)1252 static inline int bdev_io_min(struct block_device *bdev)
1253 {
1254 	return queue_io_min(bdev_get_queue(bdev));
1255 }
1256 
queue_io_opt(struct request_queue * q)1257 static inline unsigned int queue_io_opt(struct request_queue *q)
1258 {
1259 	return q->limits.io_opt;
1260 }
1261 
bdev_io_opt(struct block_device * bdev)1262 static inline int bdev_io_opt(struct block_device *bdev)
1263 {
1264 	return queue_io_opt(bdev_get_queue(bdev));
1265 }
1266 
queue_alignment_offset(struct request_queue * q)1267 static inline int queue_alignment_offset(struct request_queue *q)
1268 {
1269 	if (q->limits.misaligned)
1270 		return -1;
1271 
1272 	return q->limits.alignment_offset;
1273 }
1274 
queue_limit_alignment_offset(struct queue_limits * lim,sector_t sector)1275 static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1276 {
1277 	unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1278 	unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1279 
1280 	return (granularity + lim->alignment_offset - alignment) % granularity;
1281 }
1282 
bdev_alignment_offset(struct block_device * bdev)1283 static inline int bdev_alignment_offset(struct block_device *bdev)
1284 {
1285 	struct request_queue *q = bdev_get_queue(bdev);
1286 
1287 	if (q->limits.misaligned)
1288 		return -1;
1289 
1290 	if (bdev != bdev->bd_contains)
1291 		return bdev->bd_part->alignment_offset;
1292 
1293 	return q->limits.alignment_offset;
1294 }
1295 
queue_discard_alignment(struct request_queue * q)1296 static inline int queue_discard_alignment(struct request_queue *q)
1297 {
1298 	if (q->limits.discard_misaligned)
1299 		return -1;
1300 
1301 	return q->limits.discard_alignment;
1302 }
1303 
queue_limit_discard_alignment(struct queue_limits * lim,sector_t sector)1304 static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1305 {
1306 	unsigned int alignment, granularity, offset;
1307 
1308 	if (!lim->max_discard_sectors)
1309 		return 0;
1310 
1311 	/* Why are these in bytes, not sectors? */
1312 	alignment = lim->discard_alignment >> 9;
1313 	granularity = lim->discard_granularity >> 9;
1314 	if (!granularity)
1315 		return 0;
1316 
1317 	/* Offset of the partition start in 'granularity' sectors */
1318 	offset = sector_div(sector, granularity);
1319 
1320 	/* And why do we do this modulus *again* in blkdev_issue_discard()? */
1321 	offset = (granularity + alignment - offset) % granularity;
1322 
1323 	/* Turn it back into bytes, gaah */
1324 	return offset << 9;
1325 }
1326 
bdev_discard_alignment(struct block_device * bdev)1327 static inline int bdev_discard_alignment(struct block_device *bdev)
1328 {
1329 	struct request_queue *q = bdev_get_queue(bdev);
1330 
1331 	if (bdev != bdev->bd_contains)
1332 		return bdev->bd_part->discard_alignment;
1333 
1334 	return q->limits.discard_alignment;
1335 }
1336 
queue_discard_zeroes_data(struct request_queue * q)1337 static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1338 {
1339 	if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1340 		return 1;
1341 
1342 	return 0;
1343 }
1344 
bdev_discard_zeroes_data(struct block_device * bdev)1345 static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
1346 {
1347 	return queue_discard_zeroes_data(bdev_get_queue(bdev));
1348 }
1349 
bdev_write_same(struct block_device * bdev)1350 static inline unsigned int bdev_write_same(struct block_device *bdev)
1351 {
1352 	struct request_queue *q = bdev_get_queue(bdev);
1353 
1354 	if (q)
1355 		return q->limits.max_write_same_sectors;
1356 
1357 	return 0;
1358 }
1359 
queue_dma_alignment(struct request_queue * q)1360 static inline int queue_dma_alignment(struct request_queue *q)
1361 {
1362 	return q ? q->dma_alignment : 511;
1363 }
1364 
blk_rq_aligned(struct request_queue * q,unsigned long addr,unsigned int len)1365 static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1366 				 unsigned int len)
1367 {
1368 	unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1369 	return !(addr & alignment) && !(len & alignment);
1370 }
1371 
1372 /* assumes size > 256 */
blksize_bits(unsigned int size)1373 static inline unsigned int blksize_bits(unsigned int size)
1374 {
1375 	unsigned int bits = 8;
1376 	do {
1377 		bits++;
1378 		size >>= 1;
1379 	} while (size > 256);
1380 	return bits;
1381 }
1382 
block_size(struct block_device * bdev)1383 static inline unsigned int block_size(struct block_device *bdev)
1384 {
1385 	return bdev->bd_block_size;
1386 }
1387 
queue_flush_queueable(struct request_queue * q)1388 static inline bool queue_flush_queueable(struct request_queue *q)
1389 {
1390 	return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags);
1391 }
1392 
1393 typedef struct {struct page *v;} Sector;
1394 
1395 unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
1396 
put_dev_sector(Sector p)1397 static inline void put_dev_sector(Sector p)
1398 {
1399 	put_page(p.v);
1400 }
1401 
__bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)1402 static inline bool __bvec_gap_to_prev(struct request_queue *q,
1403 				struct bio_vec *bprv, unsigned int offset)
1404 {
1405 	return offset ||
1406 		((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1407 }
1408 
1409 /*
1410  * Check if adding a bio_vec after bprv with offset would create a gap in
1411  * the SG list. Most drivers don't care about this, but some do.
1412  */
bvec_gap_to_prev(struct request_queue * q,struct bio_vec * bprv,unsigned int offset)1413 static inline bool bvec_gap_to_prev(struct request_queue *q,
1414 				struct bio_vec *bprv, unsigned int offset)
1415 {
1416 	if (!queue_virt_boundary(q))
1417 		return false;
1418 	return __bvec_gap_to_prev(q, bprv, offset);
1419 }
1420 
bio_will_gap(struct request_queue * q,struct bio * prev,struct bio * next)1421 static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1422 			 struct bio *next)
1423 {
1424 	if (bio_has_data(prev) && queue_virt_boundary(q)) {
1425 		struct bio_vec pb, nb;
1426 
1427 		bio_get_last_bvec(prev, &pb);
1428 		bio_get_first_bvec(next, &nb);
1429 
1430 		return __bvec_gap_to_prev(q, &pb, nb.bv_offset);
1431 	}
1432 
1433 	return false;
1434 }
1435 
req_gap_back_merge(struct request * req,struct bio * bio)1436 static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1437 {
1438 	return bio_will_gap(req->q, req->biotail, bio);
1439 }
1440 
req_gap_front_merge(struct request * req,struct bio * bio)1441 static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1442 {
1443 	return bio_will_gap(req->q, bio, req->bio);
1444 }
1445 
1446 int kblockd_schedule_work(struct work_struct *work);
1447 int kblockd_schedule_work_on(int cpu, struct work_struct *work);
1448 int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1449 int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1450 
1451 #ifdef CONFIG_BLK_CGROUP
1452 /*
1453  * This should not be using sched_clock(). A real patch is in progress
1454  * to fix this up, until that is in place we need to disable preemption
1455  * around sched_clock() in this function and set_io_start_time_ns().
1456  */
set_start_time_ns(struct request * req)1457 static inline void set_start_time_ns(struct request *req)
1458 {
1459 	preempt_disable();
1460 	req->start_time_ns = sched_clock();
1461 	preempt_enable();
1462 }
1463 
set_io_start_time_ns(struct request * req)1464 static inline void set_io_start_time_ns(struct request *req)
1465 {
1466 	preempt_disable();
1467 	req->io_start_time_ns = sched_clock();
1468 	preempt_enable();
1469 }
1470 
rq_start_time_ns(struct request * req)1471 static inline uint64_t rq_start_time_ns(struct request *req)
1472 {
1473         return req->start_time_ns;
1474 }
1475 
rq_io_start_time_ns(struct request * req)1476 static inline uint64_t rq_io_start_time_ns(struct request *req)
1477 {
1478         return req->io_start_time_ns;
1479 }
1480 #else
set_start_time_ns(struct request * req)1481 static inline void set_start_time_ns(struct request *req) {}
set_io_start_time_ns(struct request * req)1482 static inline void set_io_start_time_ns(struct request *req) {}
rq_start_time_ns(struct request * req)1483 static inline uint64_t rq_start_time_ns(struct request *req)
1484 {
1485 	return 0;
1486 }
rq_io_start_time_ns(struct request * req)1487 static inline uint64_t rq_io_start_time_ns(struct request *req)
1488 {
1489 	return 0;
1490 }
1491 #endif
1492 
1493 #define MODULE_ALIAS_BLOCKDEV(major,minor) \
1494 	MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1495 #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
1496 	MODULE_ALIAS("block-major-" __stringify(major) "-*")
1497 
1498 #if defined(CONFIG_BLK_DEV_INTEGRITY)
1499 
1500 enum blk_integrity_flags {
1501 	BLK_INTEGRITY_VERIFY		= 1 << 0,
1502 	BLK_INTEGRITY_GENERATE		= 1 << 1,
1503 	BLK_INTEGRITY_DEVICE_CAPABLE	= 1 << 2,
1504 	BLK_INTEGRITY_IP_CHECKSUM	= 1 << 3,
1505 };
1506 
1507 struct blk_integrity_iter {
1508 	void			*prot_buf;
1509 	void			*data_buf;
1510 	sector_t		seed;
1511 	unsigned int		data_size;
1512 	unsigned short		interval;
1513 	const char		*disk_name;
1514 };
1515 
1516 typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1517 
1518 struct blk_integrity_profile {
1519 	integrity_processing_fn		*generate_fn;
1520 	integrity_processing_fn		*verify_fn;
1521 	const char			*name;
1522 };
1523 
1524 extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1525 extern void blk_integrity_unregister(struct gendisk *);
1526 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1527 extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1528 				   struct scatterlist *);
1529 extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1530 extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1531 				   struct request *);
1532 extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1533 				    struct bio *);
1534 
blk_get_integrity(struct gendisk * disk)1535 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1536 {
1537 	struct blk_integrity *bi = &disk->queue->integrity;
1538 
1539 	if (!bi->profile)
1540 		return NULL;
1541 
1542 	return bi;
1543 }
1544 
1545 static inline
bdev_get_integrity(struct block_device * bdev)1546 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1547 {
1548 	return blk_get_integrity(bdev->bd_disk);
1549 }
1550 
blk_integrity_rq(struct request * rq)1551 static inline bool blk_integrity_rq(struct request *rq)
1552 {
1553 	return rq->cmd_flags & REQ_INTEGRITY;
1554 }
1555 
blk_queue_max_integrity_segments(struct request_queue * q,unsigned int segs)1556 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1557 						    unsigned int segs)
1558 {
1559 	q->limits.max_integrity_segments = segs;
1560 }
1561 
1562 static inline unsigned short
queue_max_integrity_segments(struct request_queue * q)1563 queue_max_integrity_segments(struct request_queue *q)
1564 {
1565 	return q->limits.max_integrity_segments;
1566 }
1567 
integrity_req_gap_back_merge(struct request * req,struct bio * next)1568 static inline bool integrity_req_gap_back_merge(struct request *req,
1569 						struct bio *next)
1570 {
1571 	struct bio_integrity_payload *bip = bio_integrity(req->bio);
1572 	struct bio_integrity_payload *bip_next = bio_integrity(next);
1573 
1574 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1575 				bip_next->bip_vec[0].bv_offset);
1576 }
1577 
integrity_req_gap_front_merge(struct request * req,struct bio * bio)1578 static inline bool integrity_req_gap_front_merge(struct request *req,
1579 						 struct bio *bio)
1580 {
1581 	struct bio_integrity_payload *bip = bio_integrity(bio);
1582 	struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1583 
1584 	return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1585 				bip_next->bip_vec[0].bv_offset);
1586 }
1587 
1588 #else /* CONFIG_BLK_DEV_INTEGRITY */
1589 
1590 struct bio;
1591 struct block_device;
1592 struct gendisk;
1593 struct blk_integrity;
1594 
blk_integrity_rq(struct request * rq)1595 static inline int blk_integrity_rq(struct request *rq)
1596 {
1597 	return 0;
1598 }
blk_rq_count_integrity_sg(struct request_queue * q,struct bio * b)1599 static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1600 					    struct bio *b)
1601 {
1602 	return 0;
1603 }
blk_rq_map_integrity_sg(struct request_queue * q,struct bio * b,struct scatterlist * s)1604 static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1605 					  struct bio *b,
1606 					  struct scatterlist *s)
1607 {
1608 	return 0;
1609 }
bdev_get_integrity(struct block_device * b)1610 static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1611 {
1612 	return NULL;
1613 }
blk_get_integrity(struct gendisk * disk)1614 static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1615 {
1616 	return NULL;
1617 }
blk_integrity_compare(struct gendisk * a,struct gendisk * b)1618 static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1619 {
1620 	return 0;
1621 }
blk_integrity_register(struct gendisk * d,struct blk_integrity * b)1622 static inline void blk_integrity_register(struct gendisk *d,
1623 					 struct blk_integrity *b)
1624 {
1625 }
blk_integrity_unregister(struct gendisk * d)1626 static inline void blk_integrity_unregister(struct gendisk *d)
1627 {
1628 }
blk_queue_max_integrity_segments(struct request_queue * q,unsigned int segs)1629 static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1630 						    unsigned int segs)
1631 {
1632 }
queue_max_integrity_segments(struct request_queue * q)1633 static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1634 {
1635 	return 0;
1636 }
blk_integrity_merge_rq(struct request_queue * rq,struct request * r1,struct request * r2)1637 static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1638 					  struct request *r1,
1639 					  struct request *r2)
1640 {
1641 	return true;
1642 }
blk_integrity_merge_bio(struct request_queue * rq,struct request * r,struct bio * b)1643 static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1644 					   struct request *r,
1645 					   struct bio *b)
1646 {
1647 	return true;
1648 }
1649 
integrity_req_gap_back_merge(struct request * req,struct bio * next)1650 static inline bool integrity_req_gap_back_merge(struct request *req,
1651 						struct bio *next)
1652 {
1653 	return false;
1654 }
integrity_req_gap_front_merge(struct request * req,struct bio * bio)1655 static inline bool integrity_req_gap_front_merge(struct request *req,
1656 						 struct bio *bio)
1657 {
1658 	return false;
1659 }
1660 
1661 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1662 
1663 /**
1664  * struct blk_dax_ctl - control and output parameters for ->direct_access
1665  * @sector: (input) offset relative to a block_device
1666  * @addr: (output) kernel virtual address for @sector populated by driver
1667  * @pfn: (output) page frame number for @addr populated by driver
1668  * @size: (input) number of bytes requested
1669  */
1670 struct blk_dax_ctl {
1671 	sector_t sector;
1672 	void *addr;
1673 	long size;
1674 	pfn_t pfn;
1675 };
1676 
1677 struct block_device_operations {
1678 	int (*open) (struct block_device *, fmode_t);
1679 	void (*release) (struct gendisk *, fmode_t);
1680 	int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
1681 	int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1682 	int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1683 	long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
1684 			long);
1685 	unsigned int (*check_events) (struct gendisk *disk,
1686 				      unsigned int clearing);
1687 	/* ->media_changed() is DEPRECATED, use ->check_events() instead */
1688 	int (*media_changed) (struct gendisk *);
1689 	void (*unlock_native_capacity) (struct gendisk *);
1690 	int (*revalidate_disk) (struct gendisk *);
1691 	int (*getgeo)(struct block_device *, struct hd_geometry *);
1692 	/* this callback is with swap_lock and sometimes page table lock held */
1693 	void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1694 	struct module *owner;
1695 	const struct pr_ops *pr_ops;
1696 };
1697 
1698 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1699 				 unsigned long);
1700 extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1701 extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1702 						struct writeback_control *);
1703 extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
1704 extern int bdev_dax_supported(struct super_block *, int);
1705 extern bool bdev_dax_capable(struct block_device *);
1706 
1707 /*
1708  * X-axis for IO latency histogram support.
1709  */
1710 static const u_int64_t latency_x_axis_us[] = {
1711 	100,
1712 	200,
1713 	300,
1714 	400,
1715 	500,
1716 	600,
1717 	700,
1718 	800,
1719 	900,
1720 	1000,
1721 	1200,
1722 	1400,
1723 	1600,
1724 	1800,
1725 	2000,
1726 	2500,
1727 	3000,
1728 	4000,
1729 	5000,
1730 	6000,
1731 	7000,
1732 	9000,
1733 	10000
1734 };
1735 
1736 #define BLK_IO_LAT_HIST_DISABLE         0
1737 #define BLK_IO_LAT_HIST_ENABLE          1
1738 #define BLK_IO_LAT_HIST_ZERO            2
1739 
1740 struct io_latency_state {
1741 	u_int64_t	latency_y_axis[ARRAY_SIZE(latency_x_axis_us) + 1];
1742 	u_int64_t	latency_elems;
1743 	u_int64_t	latency_sum;
1744 };
1745 
1746 static inline void
blk_update_latency_hist(struct io_latency_state * s,u_int64_t delta_us)1747 blk_update_latency_hist(struct io_latency_state *s, u_int64_t delta_us)
1748 {
1749 	int i;
1750 
1751 	for (i = 0; i < ARRAY_SIZE(latency_x_axis_us); i++)
1752 		if (delta_us < (u_int64_t)latency_x_axis_us[i])
1753 			break;
1754 	s->latency_y_axis[i]++;
1755 	s->latency_elems++;
1756 	s->latency_sum += delta_us;
1757 }
1758 
1759 ssize_t blk_latency_hist_show(char* name, struct io_latency_state *s,
1760 		char *buf, int buf_size);
1761 
1762 #else /* CONFIG_BLOCK */
1763 
1764 struct block_device;
1765 
1766 /*
1767  * stubs for when the block layer is configured out
1768  */
1769 #define buffer_heads_over_limit 0
1770 
nr_blockdev_pages(void)1771 static inline long nr_blockdev_pages(void)
1772 {
1773 	return 0;
1774 }
1775 
1776 struct blk_plug {
1777 };
1778 
blk_start_plug(struct blk_plug * plug)1779 static inline void blk_start_plug(struct blk_plug *plug)
1780 {
1781 }
1782 
blk_finish_plug(struct blk_plug * plug)1783 static inline void blk_finish_plug(struct blk_plug *plug)
1784 {
1785 }
1786 
blk_flush_plug(struct task_struct * task)1787 static inline void blk_flush_plug(struct task_struct *task)
1788 {
1789 }
1790 
blk_schedule_flush_plug(struct task_struct * task)1791 static inline void blk_schedule_flush_plug(struct task_struct *task)
1792 {
1793 }
1794 
1795 
blk_needs_flush_plug(struct task_struct * tsk)1796 static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1797 {
1798 	return false;
1799 }
1800 
blkdev_issue_flush(struct block_device * bdev,gfp_t gfp_mask,sector_t * error_sector)1801 static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1802 				     sector_t *error_sector)
1803 {
1804 	return 0;
1805 }
1806 
1807 #endif /* CONFIG_BLOCK */
1808 
1809 #endif
1810