1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Functions related to setting various queue properties from drivers
4 */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/bio.h>
9 #include <linux/blk-integrity.h>
10 #include <linux/pagemap.h>
11 #include <linux/backing-dev-defs.h>
12 #include <linux/gcd.h>
13 #include <linux/lcm.h>
14 #include <linux/jiffies.h>
15 #include <linux/gfp.h>
16 #include <linux/dma-mapping.h>
17
18 #include "blk.h"
19 #include "blk-rq-qos.h"
20 #include "blk-wbt.h"
21
blk_queue_rq_timeout(struct request_queue * q,unsigned int timeout)22 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
23 {
24 q->rq_timeout = timeout;
25 }
26 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
27
28 /**
29 * blk_set_stacking_limits - set default limits for stacking devices
30 * @lim: the queue_limits structure to reset
31 *
32 * Prepare queue limits for applying limits from underlying devices using
33 * blk_stack_limits().
34 */
blk_set_stacking_limits(struct queue_limits * lim)35 void blk_set_stacking_limits(struct queue_limits *lim)
36 {
37 memset(lim, 0, sizeof(*lim));
38 lim->logical_block_size = SECTOR_SIZE;
39 lim->physical_block_size = SECTOR_SIZE;
40 lim->io_min = SECTOR_SIZE;
41 lim->discard_granularity = SECTOR_SIZE;
42 lim->dma_alignment = SECTOR_SIZE - 1;
43 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
44
45 /* Inherit limits from component devices */
46 lim->max_segments = USHRT_MAX;
47 lim->max_discard_segments = USHRT_MAX;
48 lim->max_hw_sectors = UINT_MAX;
49 lim->max_segment_size = UINT_MAX;
50 lim->max_sectors = UINT_MAX;
51 lim->max_dev_sectors = UINT_MAX;
52 lim->max_write_zeroes_sectors = UINT_MAX;
53 lim->max_zone_append_sectors = UINT_MAX;
54 lim->max_user_discard_sectors = UINT_MAX;
55 }
56 EXPORT_SYMBOL(blk_set_stacking_limits);
57
blk_apply_bdi_limits(struct backing_dev_info * bdi,struct queue_limits * lim)58 void blk_apply_bdi_limits(struct backing_dev_info *bdi,
59 struct queue_limits *lim)
60 {
61 /*
62 * For read-ahead of large files to be effective, we need to read ahead
63 * at least twice the optimal I/O size.
64 *
65 * There is no hardware limitation for the read-ahead size and the user
66 * might have increased the read-ahead size through sysfs, so don't ever
67 * decrease it.
68 */
69 bdi->ra_pages = max3(bdi->ra_pages,
70 lim->io_opt * 2 / PAGE_SIZE,
71 VM_READAHEAD_PAGES);
72 bdi->io_pages = lim->max_sectors >> PAGE_SECTORS_SHIFT;
73 }
74
blk_validate_zoned_limits(struct queue_limits * lim)75 static int blk_validate_zoned_limits(struct queue_limits *lim)
76 {
77 if (!(lim->features & BLK_FEAT_ZONED)) {
78 if (WARN_ON_ONCE(lim->max_open_zones) ||
79 WARN_ON_ONCE(lim->max_active_zones) ||
80 WARN_ON_ONCE(lim->zone_write_granularity) ||
81 WARN_ON_ONCE(lim->max_zone_append_sectors))
82 return -EINVAL;
83 return 0;
84 }
85
86 if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)))
87 return -EINVAL;
88
89 /*
90 * Given that active zones include open zones, the maximum number of
91 * open zones cannot be larger than the maximum number of active zones.
92 */
93 if (lim->max_active_zones &&
94 lim->max_open_zones > lim->max_active_zones)
95 return -EINVAL;
96
97 if (lim->zone_write_granularity < lim->logical_block_size)
98 lim->zone_write_granularity = lim->logical_block_size;
99
100 if (lim->max_zone_append_sectors) {
101 /*
102 * The Zone Append size is limited by the maximum I/O size
103 * and the zone size given that it can't span zones.
104 */
105 lim->max_zone_append_sectors =
106 min3(lim->max_hw_sectors,
107 lim->max_zone_append_sectors,
108 lim->chunk_sectors);
109 }
110
111 return 0;
112 }
113
blk_validate_integrity_limits(struct queue_limits * lim)114 static int blk_validate_integrity_limits(struct queue_limits *lim)
115 {
116 struct blk_integrity *bi = &lim->integrity;
117
118 if (!bi->tuple_size) {
119 if (bi->csum_type != BLK_INTEGRITY_CSUM_NONE ||
120 bi->tag_size || ((bi->flags & BLK_INTEGRITY_REF_TAG))) {
121 pr_warn("invalid PI settings.\n");
122 return -EINVAL;
123 }
124 return 0;
125 }
126
127 if (lim->features & BLK_FEAT_BOUNCE_HIGH) {
128 pr_warn("no bounce buffer support for integrity metadata\n");
129 return -EINVAL;
130 }
131
132 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY)) {
133 pr_warn("integrity support disabled.\n");
134 return -EINVAL;
135 }
136
137 if (bi->csum_type == BLK_INTEGRITY_CSUM_NONE &&
138 (bi->flags & BLK_INTEGRITY_REF_TAG)) {
139 pr_warn("ref tag not support without checksum.\n");
140 return -EINVAL;
141 }
142
143 if (!bi->interval_exp)
144 bi->interval_exp = ilog2(lim->logical_block_size);
145
146 return 0;
147 }
148
149 /*
150 * Returns max guaranteed bytes which we can fit in a bio.
151 *
152 * We request that an atomic_write is ITER_UBUF iov_iter (so a single vector),
153 * so we assume that we can fit in at least PAGE_SIZE in a segment, apart from
154 * the first and last segments.
155 */
blk_queue_max_guaranteed_bio(struct queue_limits * lim)156 static unsigned int blk_queue_max_guaranteed_bio(struct queue_limits *lim)
157 {
158 unsigned int max_segments = min(BIO_MAX_VECS, lim->max_segments);
159 unsigned int length;
160
161 length = min(max_segments, 2) * lim->logical_block_size;
162 if (max_segments > 2)
163 length += (max_segments - 2) * PAGE_SIZE;
164
165 return length;
166 }
167
blk_atomic_writes_update_limits(struct queue_limits * lim)168 static void blk_atomic_writes_update_limits(struct queue_limits *lim)
169 {
170 unsigned int unit_limit = min(lim->max_hw_sectors << SECTOR_SHIFT,
171 blk_queue_max_guaranteed_bio(lim));
172
173 unit_limit = rounddown_pow_of_two(unit_limit);
174
175 lim->atomic_write_max_sectors =
176 min(lim->atomic_write_hw_max >> SECTOR_SHIFT,
177 lim->max_hw_sectors);
178 lim->atomic_write_unit_min =
179 min(lim->atomic_write_hw_unit_min, unit_limit);
180 lim->atomic_write_unit_max =
181 min(lim->atomic_write_hw_unit_max, unit_limit);
182 lim->atomic_write_boundary_sectors =
183 lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
184 }
185
blk_validate_atomic_write_limits(struct queue_limits * lim)186 static void blk_validate_atomic_write_limits(struct queue_limits *lim)
187 {
188 unsigned int boundary_sectors;
189
190 if (!lim->atomic_write_hw_max)
191 goto unsupported;
192
193 boundary_sectors = lim->atomic_write_hw_boundary >> SECTOR_SHIFT;
194
195 if (boundary_sectors) {
196 /*
197 * A feature of boundary support is that it disallows bios to
198 * be merged which would result in a merged request which
199 * crosses either a chunk sector or atomic write HW boundary,
200 * even though chunk sectors may be just set for performance.
201 * For simplicity, disallow atomic writes for a chunk sector
202 * which is non-zero and smaller than atomic write HW boundary.
203 * Furthermore, chunk sectors must be a multiple of atomic
204 * write HW boundary. Otherwise boundary support becomes
205 * complicated.
206 * Devices which do not conform to these rules can be dealt
207 * with if and when they show up.
208 */
209 if (WARN_ON_ONCE(lim->chunk_sectors % boundary_sectors))
210 goto unsupported;
211
212 /*
213 * The boundary size just needs to be a multiple of unit_max
214 * (and not necessarily a power-of-2), so this following check
215 * could be relaxed in future.
216 * Furthermore, if needed, unit_max could even be reduced so
217 * that it is compliant with a !power-of-2 boundary.
218 */
219 if (!is_power_of_2(boundary_sectors))
220 goto unsupported;
221 }
222
223 blk_atomic_writes_update_limits(lim);
224 return;
225
226 unsupported:
227 lim->atomic_write_max_sectors = 0;
228 lim->atomic_write_boundary_sectors = 0;
229 lim->atomic_write_unit_min = 0;
230 lim->atomic_write_unit_max = 0;
231 }
232
233 /*
234 * Check that the limits in lim are valid, initialize defaults for unset
235 * values, and cap values based on others where needed.
236 */
blk_validate_limits(struct queue_limits * lim)237 static int blk_validate_limits(struct queue_limits *lim)
238 {
239 unsigned int max_hw_sectors;
240 unsigned int logical_block_sectors;
241 unsigned long seg_size;
242 int err;
243
244 /*
245 * Unless otherwise specified, default to 512 byte logical blocks and a
246 * physical block size equal to the logical block size.
247 */
248 if (!lim->logical_block_size)
249 lim->logical_block_size = SECTOR_SIZE;
250 else if (blk_validate_block_size(lim->logical_block_size)) {
251 pr_warn("Invalid logical block size (%d)\n", lim->logical_block_size);
252 return -EINVAL;
253 }
254 if (lim->physical_block_size < lim->logical_block_size)
255 lim->physical_block_size = lim->logical_block_size;
256
257 /*
258 * The minimum I/O size defaults to the physical block size unless
259 * explicitly overridden.
260 */
261 if (lim->io_min < lim->physical_block_size)
262 lim->io_min = lim->physical_block_size;
263
264 /*
265 * The optimal I/O size may not be aligned to physical block size
266 * (because it may be limited by dma engines which have no clue about
267 * block size of the disks attached to them), so we round it down here.
268 */
269 lim->io_opt = round_down(lim->io_opt, lim->physical_block_size);
270
271 /*
272 * max_hw_sectors has a somewhat weird default for historical reason,
273 * but driver really should set their own instead of relying on this
274 * value.
275 *
276 * The block layer relies on the fact that every driver can
277 * handle at lest a page worth of data per I/O, and needs the value
278 * aligned to the logical block size.
279 */
280 if (!lim->max_hw_sectors)
281 lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
282 if (WARN_ON_ONCE(lim->max_hw_sectors < PAGE_SECTORS))
283 return -EINVAL;
284 logical_block_sectors = lim->logical_block_size >> SECTOR_SHIFT;
285 if (WARN_ON_ONCE(logical_block_sectors > lim->max_hw_sectors))
286 return -EINVAL;
287 lim->max_hw_sectors = round_down(lim->max_hw_sectors,
288 logical_block_sectors);
289
290 /*
291 * The actual max_sectors value is a complex beast and also takes the
292 * max_dev_sectors value (set by SCSI ULPs) and a user configurable
293 * value into account. The ->max_sectors value is always calculated
294 * from these, so directly setting it won't have any effect.
295 */
296 max_hw_sectors = min_not_zero(lim->max_hw_sectors,
297 lim->max_dev_sectors);
298 if (lim->max_user_sectors) {
299 if (lim->max_user_sectors < BLK_MIN_SEGMENT_SIZE / SECTOR_SIZE)
300 return -EINVAL;
301 lim->max_sectors = min(max_hw_sectors, lim->max_user_sectors);
302 } else if (lim->io_opt > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
303 lim->max_sectors =
304 min(max_hw_sectors, lim->io_opt >> SECTOR_SHIFT);
305 } else if (lim->io_min > (BLK_DEF_MAX_SECTORS_CAP << SECTOR_SHIFT)) {
306 lim->max_sectors =
307 min(max_hw_sectors, lim->io_min >> SECTOR_SHIFT);
308 } else {
309 lim->max_sectors = min(max_hw_sectors, BLK_DEF_MAX_SECTORS_CAP);
310 }
311 lim->max_sectors = round_down(lim->max_sectors,
312 logical_block_sectors);
313
314 /*
315 * Random default for the maximum number of segments. Driver should not
316 * rely on this and set their own.
317 */
318 if (!lim->max_segments)
319 lim->max_segments = BLK_MAX_SEGMENTS;
320
321 lim->max_discard_sectors =
322 min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
323
324 /*
325 * When discard is not supported, discard_granularity should be reported
326 * as 0 to userspace.
327 */
328 if (lim->max_discard_sectors)
329 lim->discard_granularity =
330 max(lim->discard_granularity, lim->physical_block_size);
331 else
332 lim->discard_granularity = 0;
333
334 if (!lim->max_discard_segments)
335 lim->max_discard_segments = 1;
336
337 /*
338 * By default there is no limit on the segment boundary alignment,
339 * but if there is one it can't be smaller than the page size as
340 * that would break all the normal I/O patterns.
341 */
342 if (!lim->seg_boundary_mask)
343 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
344 if (WARN_ON_ONCE(lim->seg_boundary_mask < BLK_MIN_SEGMENT_SIZE - 1))
345 return -EINVAL;
346
347 /*
348 * Stacking device may have both virtual boundary and max segment
349 * size limit, so allow this setting now, and long-term the two
350 * might need to move out of stacking limits since we have immutable
351 * bvec and lower layer bio splitting is supposed to handle the two
352 * correctly.
353 */
354 if (lim->virt_boundary_mask) {
355 if (!lim->max_segment_size)
356 lim->max_segment_size = UINT_MAX;
357 } else {
358 /*
359 * The maximum segment size has an odd historic 64k default that
360 * drivers probably should override. Just like the I/O size we
361 * require drivers to at least handle a full page per segment.
362 */
363 if (!lim->max_segment_size)
364 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
365 if (WARN_ON_ONCE(lim->max_segment_size < BLK_MIN_SEGMENT_SIZE))
366 return -EINVAL;
367 }
368
369 /* setup min segment size for building new segment in fast path */
370 if (lim->seg_boundary_mask > lim->max_segment_size - 1)
371 seg_size = lim->max_segment_size;
372 else
373 seg_size = lim->seg_boundary_mask + 1;
374 lim->min_segment_size = min_t(unsigned int, seg_size, PAGE_SIZE);
375
376 /*
377 * We require drivers to at least do logical block aligned I/O, but
378 * historically could not check for that due to the separate calls
379 * to set the limits. Once the transition is finished the check
380 * below should be narrowed down to check the logical block size.
381 */
382 if (!lim->dma_alignment)
383 lim->dma_alignment = SECTOR_SIZE - 1;
384 if (WARN_ON_ONCE(lim->dma_alignment > PAGE_SIZE))
385 return -EINVAL;
386
387 if (lim->alignment_offset) {
388 lim->alignment_offset &= (lim->physical_block_size - 1);
389 lim->flags &= ~BLK_FLAG_MISALIGNED;
390 }
391
392 if (!(lim->features & BLK_FEAT_WRITE_CACHE))
393 lim->features &= ~BLK_FEAT_FUA;
394
395 blk_validate_atomic_write_limits(lim);
396
397 err = blk_validate_integrity_limits(lim);
398 if (err)
399 return err;
400 return blk_validate_zoned_limits(lim);
401 }
402
403 /*
404 * Set the default limits for a newly allocated queue. @lim contains the
405 * initial limits set by the driver, which could be no limit in which case
406 * all fields are cleared to zero.
407 */
blk_set_default_limits(struct queue_limits * lim)408 int blk_set_default_limits(struct queue_limits *lim)
409 {
410 /*
411 * Most defaults are set by capping the bounds in blk_validate_limits,
412 * but max_user_discard_sectors is special and needs an explicit
413 * initialization to the max value here.
414 */
415 lim->max_user_discard_sectors = UINT_MAX;
416 return blk_validate_limits(lim);
417 }
418
419 /**
420 * queue_limits_commit_update - commit an atomic update of queue limits
421 * @q: queue to update
422 * @lim: limits to apply
423 *
424 * Apply the limits in @lim that were obtained from queue_limits_start_update()
425 * and updated by the caller to @q.
426 *
427 * Returns 0 if successful, else a negative error code.
428 */
queue_limits_commit_update(struct request_queue * q,struct queue_limits * lim)429 int queue_limits_commit_update(struct request_queue *q,
430 struct queue_limits *lim)
431 {
432 int error;
433
434 error = blk_validate_limits(lim);
435 if (error)
436 goto out_unlock;
437
438 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
439 if (q->crypto_profile && lim->integrity.tag_size) {
440 pr_warn("blk-integrity: Integrity and hardware inline encryption are not supported together.\n");
441 error = -EINVAL;
442 goto out_unlock;
443 }
444 #endif
445
446 q->limits = *lim;
447 if (q->disk)
448 blk_apply_bdi_limits(q->disk->bdi, lim);
449 out_unlock:
450 mutex_unlock(&q->limits_lock);
451 return error;
452 }
453 EXPORT_SYMBOL_GPL(queue_limits_commit_update);
454
455 /**
456 * queue_limits_commit_update_frozen - commit an atomic update of queue limits
457 * @q: queue to update
458 * @lim: limits to apply
459 *
460 * Apply the limits in @lim that were obtained from queue_limits_start_update()
461 * and updated with the new values by the caller to @q. Freezes the queue
462 * before the update and unfreezes it after.
463 *
464 * Returns 0 if successful, else a negative error code.
465 */
queue_limits_commit_update_frozen(struct request_queue * q,struct queue_limits * lim)466 int queue_limits_commit_update_frozen(struct request_queue *q,
467 struct queue_limits *lim)
468 {
469 int ret;
470
471 blk_mq_freeze_queue(q);
472 ret = queue_limits_commit_update(q, lim);
473 blk_mq_unfreeze_queue(q);
474
475 return ret;
476 }
477 EXPORT_SYMBOL_GPL(queue_limits_commit_update_frozen);
478
479 /**
480 * queue_limits_set - apply queue limits to queue
481 * @q: queue to update
482 * @lim: limits to apply
483 *
484 * Apply the limits in @lim that were freshly initialized to @q.
485 * To update existing limits use queue_limits_start_update() and
486 * queue_limits_commit_update() instead.
487 *
488 * Returns 0 if successful, else a negative error code.
489 */
queue_limits_set(struct request_queue * q,struct queue_limits * lim)490 int queue_limits_set(struct request_queue *q, struct queue_limits *lim)
491 {
492 mutex_lock(&q->limits_lock);
493 return queue_limits_commit_update(q, lim);
494 }
495 EXPORT_SYMBOL_GPL(queue_limits_set);
496
queue_limit_alignment_offset(const struct queue_limits * lim,sector_t sector)497 static int queue_limit_alignment_offset(const struct queue_limits *lim,
498 sector_t sector)
499 {
500 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
501 unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
502 << SECTOR_SHIFT;
503
504 return (granularity + lim->alignment_offset - alignment) % granularity;
505 }
506
queue_limit_discard_alignment(const struct queue_limits * lim,sector_t sector)507 static unsigned int queue_limit_discard_alignment(
508 const struct queue_limits *lim, sector_t sector)
509 {
510 unsigned int alignment, granularity, offset;
511
512 if (!lim->max_discard_sectors)
513 return 0;
514
515 /* Why are these in bytes, not sectors? */
516 alignment = lim->discard_alignment >> SECTOR_SHIFT;
517 granularity = lim->discard_granularity >> SECTOR_SHIFT;
518 if (!granularity)
519 return 0;
520
521 /* Offset of the partition start in 'granularity' sectors */
522 offset = sector_div(sector, granularity);
523
524 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
525 offset = (granularity + alignment - offset) % granularity;
526
527 /* Turn it back into bytes, gaah */
528 return offset << SECTOR_SHIFT;
529 }
530
blk_round_down_sectors(unsigned int sectors,unsigned int lbs)531 static unsigned int blk_round_down_sectors(unsigned int sectors, unsigned int lbs)
532 {
533 sectors = round_down(sectors, lbs >> SECTOR_SHIFT);
534 if (sectors < PAGE_SIZE >> SECTOR_SHIFT)
535 sectors = PAGE_SIZE >> SECTOR_SHIFT;
536 return sectors;
537 }
538
539 /**
540 * blk_stack_limits - adjust queue_limits for stacked devices
541 * @t: the stacking driver limits (top device)
542 * @b: the underlying queue limits (bottom, component device)
543 * @start: first data sector within component device
544 *
545 * Description:
546 * This function is used by stacking drivers like MD and DM to ensure
547 * that all component devices have compatible block sizes and
548 * alignments. The stacking driver must provide a queue_limits
549 * struct (top) and then iteratively call the stacking function for
550 * all component (bottom) devices. The stacking function will
551 * attempt to combine the values and ensure proper alignment.
552 *
553 * Returns 0 if the top and bottom queue_limits are compatible. The
554 * top device's block sizes and alignment offsets may be adjusted to
555 * ensure alignment with the bottom device. If no compatible sizes
556 * and alignments exist, -1 is returned and the resulting top
557 * queue_limits will have the misaligned flag set to indicate that
558 * the alignment_offset is undefined.
559 */
blk_stack_limits(struct queue_limits * t,struct queue_limits * b,sector_t start)560 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
561 sector_t start)
562 {
563 unsigned int top, bottom, alignment, ret = 0;
564
565 t->features |= (b->features & BLK_FEAT_INHERIT_MASK);
566
567 /*
568 * BLK_FEAT_NOWAIT and BLK_FEAT_POLL need to be supported both by the
569 * stacking driver and all underlying devices. The stacking driver sets
570 * the flags before stacking the limits, and this will clear the flags
571 * if any of the underlying devices does not support it.
572 */
573 if (!(b->features & BLK_FEAT_NOWAIT))
574 t->features &= ~BLK_FEAT_NOWAIT;
575 if (!(b->features & BLK_FEAT_POLL))
576 t->features &= ~BLK_FEAT_POLL;
577
578 t->flags |= (b->flags & BLK_FLAG_MISALIGNED);
579
580 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
581 t->max_user_sectors = min_not_zero(t->max_user_sectors,
582 b->max_user_sectors);
583 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
584 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
585 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
586 b->max_write_zeroes_sectors);
587 t->max_zone_append_sectors = min(queue_limits_max_zone_append_sectors(t),
588 queue_limits_max_zone_append_sectors(b));
589
590 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
591 b->seg_boundary_mask);
592 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
593 b->virt_boundary_mask);
594
595 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
596 t->max_discard_segments = min_not_zero(t->max_discard_segments,
597 b->max_discard_segments);
598 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
599 b->max_integrity_segments);
600
601 t->max_segment_size = min_not_zero(t->max_segment_size,
602 b->max_segment_size);
603
604 alignment = queue_limit_alignment_offset(b, start);
605
606 /* Bottom device has different alignment. Check that it is
607 * compatible with the current top alignment.
608 */
609 if (t->alignment_offset != alignment) {
610
611 top = max(t->physical_block_size, t->io_min)
612 + t->alignment_offset;
613 bottom = max(b->physical_block_size, b->io_min) + alignment;
614
615 /* Verify that top and bottom intervals line up */
616 if (max(top, bottom) % min(top, bottom)) {
617 t->flags |= BLK_FLAG_MISALIGNED;
618 ret = -1;
619 }
620 }
621
622 t->logical_block_size = max(t->logical_block_size,
623 b->logical_block_size);
624
625 t->physical_block_size = max(t->physical_block_size,
626 b->physical_block_size);
627
628 t->io_min = max(t->io_min, b->io_min);
629 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
630 t->dma_alignment = max(t->dma_alignment, b->dma_alignment);
631
632 /* Set non-power-of-2 compatible chunk_sectors boundary */
633 if (b->chunk_sectors)
634 t->chunk_sectors = gcd(t->chunk_sectors, b->chunk_sectors);
635
636 /* Physical block size a multiple of the logical block size? */
637 if (t->physical_block_size & (t->logical_block_size - 1)) {
638 t->physical_block_size = t->logical_block_size;
639 t->flags |= BLK_FLAG_MISALIGNED;
640 ret = -1;
641 }
642
643 /* Minimum I/O a multiple of the physical block size? */
644 if (t->io_min & (t->physical_block_size - 1)) {
645 t->io_min = t->physical_block_size;
646 t->flags |= BLK_FLAG_MISALIGNED;
647 ret = -1;
648 }
649
650 /* Optimal I/O a multiple of the physical block size? */
651 if (t->io_opt & (t->physical_block_size - 1)) {
652 t->io_opt = 0;
653 t->flags |= BLK_FLAG_MISALIGNED;
654 ret = -1;
655 }
656
657 /* chunk_sectors a multiple of the physical block size? */
658 if (t->chunk_sectors % (t->physical_block_size >> SECTOR_SHIFT)) {
659 t->chunk_sectors = 0;
660 t->flags |= BLK_FLAG_MISALIGNED;
661 ret = -1;
662 }
663
664 /* Find lowest common alignment_offset */
665 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
666 % max(t->physical_block_size, t->io_min);
667
668 /* Verify that new alignment_offset is on a logical block boundary */
669 if (t->alignment_offset & (t->logical_block_size - 1)) {
670 t->flags |= BLK_FLAG_MISALIGNED;
671 ret = -1;
672 }
673
674 t->max_sectors = blk_round_down_sectors(t->max_sectors, t->logical_block_size);
675 t->max_hw_sectors = blk_round_down_sectors(t->max_hw_sectors, t->logical_block_size);
676 t->max_dev_sectors = blk_round_down_sectors(t->max_dev_sectors, t->logical_block_size);
677
678 /* Discard alignment and granularity */
679 if (b->discard_granularity) {
680 alignment = queue_limit_discard_alignment(b, start);
681
682 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
683 b->max_discard_sectors);
684 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
685 b->max_hw_discard_sectors);
686 t->discard_granularity = max(t->discard_granularity,
687 b->discard_granularity);
688 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
689 t->discard_granularity;
690 }
691 t->max_secure_erase_sectors = min_not_zero(t->max_secure_erase_sectors,
692 b->max_secure_erase_sectors);
693 t->zone_write_granularity = max(t->zone_write_granularity,
694 b->zone_write_granularity);
695 if (!(t->features & BLK_FEAT_ZONED)) {
696 t->zone_write_granularity = 0;
697 t->max_zone_append_sectors = 0;
698 }
699 return ret;
700 }
701 EXPORT_SYMBOL(blk_stack_limits);
702
703 /**
704 * queue_limits_stack_bdev - adjust queue_limits for stacked devices
705 * @t: the stacking driver limits (top device)
706 * @bdev: the underlying block device (bottom)
707 * @offset: offset to beginning of data within component device
708 * @pfx: prefix to use for warnings logged
709 *
710 * Description:
711 * This function is used by stacking drivers like MD and DM to ensure
712 * that all component devices have compatible block sizes and
713 * alignments. The stacking driver must provide a queue_limits
714 * struct (top) and then iteratively call the stacking function for
715 * all component (bottom) devices. The stacking function will
716 * attempt to combine the values and ensure proper alignment.
717 */
queue_limits_stack_bdev(struct queue_limits * t,struct block_device * bdev,sector_t offset,const char * pfx)718 void queue_limits_stack_bdev(struct queue_limits *t, struct block_device *bdev,
719 sector_t offset, const char *pfx)
720 {
721 if (blk_stack_limits(t, &bdev_get_queue(bdev)->limits,
722 get_start_sect(bdev) + offset))
723 pr_notice("%s: Warning: Device %pg is misaligned\n",
724 pfx, bdev);
725 }
726 EXPORT_SYMBOL_GPL(queue_limits_stack_bdev);
727
728 /**
729 * queue_limits_stack_integrity - stack integrity profile
730 * @t: target queue limits
731 * @b: base queue limits
732 *
733 * Check if the integrity profile in the @b can be stacked into the
734 * target @t. Stacking is possible if either:
735 *
736 * a) does not have any integrity information stacked into it yet
737 * b) the integrity profile in @b is identical to the one in @t
738 *
739 * If @b can be stacked into @t, return %true. Else return %false and clear the
740 * integrity information in @t.
741 */
queue_limits_stack_integrity(struct queue_limits * t,struct queue_limits * b)742 bool queue_limits_stack_integrity(struct queue_limits *t,
743 struct queue_limits *b)
744 {
745 struct blk_integrity *ti = &t->integrity;
746 struct blk_integrity *bi = &b->integrity;
747
748 if (!IS_ENABLED(CONFIG_BLK_DEV_INTEGRITY))
749 return true;
750
751 if (!ti->tuple_size) {
752 /* inherit the settings from the first underlying device */
753 if (!(ti->flags & BLK_INTEGRITY_STACKED)) {
754 ti->flags = BLK_INTEGRITY_DEVICE_CAPABLE |
755 (bi->flags & BLK_INTEGRITY_REF_TAG);
756 ti->csum_type = bi->csum_type;
757 ti->tuple_size = bi->tuple_size;
758 ti->pi_offset = bi->pi_offset;
759 ti->interval_exp = bi->interval_exp;
760 ti->tag_size = bi->tag_size;
761 goto done;
762 }
763 if (!bi->tuple_size)
764 goto done;
765 }
766
767 if (ti->tuple_size != bi->tuple_size)
768 goto incompatible;
769 if (ti->interval_exp != bi->interval_exp)
770 goto incompatible;
771 if (ti->tag_size != bi->tag_size)
772 goto incompatible;
773 if (ti->csum_type != bi->csum_type)
774 goto incompatible;
775 if ((ti->flags & BLK_INTEGRITY_REF_TAG) !=
776 (bi->flags & BLK_INTEGRITY_REF_TAG))
777 goto incompatible;
778
779 done:
780 ti->flags |= BLK_INTEGRITY_STACKED;
781 return true;
782
783 incompatible:
784 memset(ti, 0, sizeof(*ti));
785 return false;
786 }
787 EXPORT_SYMBOL_GPL(queue_limits_stack_integrity);
788
789 /**
790 * blk_set_queue_depth - tell the block layer about the device queue depth
791 * @q: the request queue for the device
792 * @depth: queue depth
793 *
794 */
blk_set_queue_depth(struct request_queue * q,unsigned int depth)795 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
796 {
797 q->queue_depth = depth;
798 rq_qos_queue_depth_changed(q);
799 }
800 EXPORT_SYMBOL(blk_set_queue_depth);
801
bdev_alignment_offset(struct block_device * bdev)802 int bdev_alignment_offset(struct block_device *bdev)
803 {
804 struct request_queue *q = bdev_get_queue(bdev);
805
806 if (q->limits.flags & BLK_FLAG_MISALIGNED)
807 return -1;
808 if (bdev_is_partition(bdev))
809 return queue_limit_alignment_offset(&q->limits,
810 bdev->bd_start_sect);
811 return q->limits.alignment_offset;
812 }
813 EXPORT_SYMBOL_GPL(bdev_alignment_offset);
814
bdev_discard_alignment(struct block_device * bdev)815 unsigned int bdev_discard_alignment(struct block_device *bdev)
816 {
817 struct request_queue *q = bdev_get_queue(bdev);
818
819 if (bdev_is_partition(bdev))
820 return queue_limit_discard_alignment(&q->limits,
821 bdev->bd_start_sect);
822 return q->limits.discard_alignment;
823 }
824 EXPORT_SYMBOL_GPL(bdev_discard_alignment);
825