• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Functions related to generic helpers functions
4  */
5 #include <linux/kernel.h>
6 #include <linux/module.h>
7 #include <linux/bio.h>
8 #include <linux/blkdev.h>
9 #include <linux/scatterlist.h>
10 
11 #include "blk.h"
12 
next_bio(struct bio * bio,unsigned int nr_pages,gfp_t gfp)13 static struct bio *next_bio(struct bio *bio, unsigned int nr_pages,
14 		gfp_t gfp)
15 {
16 	struct bio *new = bio_alloc(gfp, nr_pages);
17 
18 	if (bio) {
19 		bio_chain(bio, new);
20 		submit_bio(bio);
21 	}
22 
23 	return new;
24 }
25 
__blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,int flags,struct bio ** biop)26 int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
27 		sector_t nr_sects, gfp_t gfp_mask, int flags,
28 		struct bio **biop)
29 {
30 	struct request_queue *q = bdev_get_queue(bdev);
31 	struct bio *bio = *biop;
32 	unsigned int granularity;
33 	unsigned int op;
34 	int alignment;
35 	sector_t bs_mask;
36 
37 	if (!q)
38 		return -ENXIO;
39 
40 	if (flags & BLKDEV_DISCARD_SECURE) {
41 		if (!blk_queue_secure_erase(q))
42 			return -EOPNOTSUPP;
43 		op = REQ_OP_SECURE_ERASE;
44 	} else {
45 		if (!blk_queue_discard(q))
46 			return -EOPNOTSUPP;
47 		op = REQ_OP_DISCARD;
48 	}
49 
50 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
51 	if ((sector | nr_sects) & bs_mask)
52 		return -EINVAL;
53 
54 	/* Zero-sector (unknown) and one-sector granularities are the same.  */
55 	granularity = max(q->limits.discard_granularity >> 9, 1U);
56 	alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
57 
58 	while (nr_sects) {
59 		unsigned int req_sects;
60 		sector_t end_sect, tmp;
61 
62 		/*
63 		 * Issue in chunks of the user defined max discard setting,
64 		 * ensuring that bi_size doesn't overflow
65 		 */
66 		req_sects = min_t(sector_t, nr_sects,
67 					q->limits.max_discard_sectors);
68 		if (!req_sects)
69 			goto fail;
70 		if (req_sects > UINT_MAX >> 9)
71 			req_sects = UINT_MAX >> 9;
72 
73 		/*
74 		 * If splitting a request, and the next starting sector would be
75 		 * misaligned, stop the discard at the previous aligned sector.
76 		 */
77 		end_sect = sector + req_sects;
78 		tmp = end_sect;
79 		if (req_sects < nr_sects &&
80 		    sector_div(tmp, granularity) != alignment) {
81 			end_sect = end_sect - alignment;
82 			sector_div(end_sect, granularity);
83 			end_sect = end_sect * granularity + alignment;
84 			req_sects = end_sect - sector;
85 		}
86 
87 		bio = next_bio(bio, 0, gfp_mask);
88 		bio->bi_iter.bi_sector = sector;
89 		bio_set_dev(bio, bdev);
90 		bio_set_op_attrs(bio, op, 0);
91 
92 		bio->bi_iter.bi_size = req_sects << 9;
93 		nr_sects -= req_sects;
94 		sector = end_sect;
95 
96 		/*
97 		 * We can loop for a long time in here, if someone does
98 		 * full device discards (like mkfs). Be nice and allow
99 		 * us to schedule out to avoid softlocking if preempt
100 		 * is disabled.
101 		 */
102 		cond_resched();
103 	}
104 
105 	*biop = bio;
106 	return 0;
107 
108 fail:
109 	if (bio) {
110 		submit_bio_wait(bio);
111 		bio_put(bio);
112 	}
113 	*biop = NULL;
114 	return -EOPNOTSUPP;
115 }
116 EXPORT_SYMBOL(__blkdev_issue_discard);
117 
118 /**
119  * blkdev_issue_discard - queue a discard
120  * @bdev:	blockdev to issue discard for
121  * @sector:	start sector
122  * @nr_sects:	number of sectors to discard
123  * @gfp_mask:	memory allocation flags (for bio_alloc)
124  * @flags:	BLKDEV_DISCARD_* flags to control behaviour
125  *
126  * Description:
127  *    Issue a discard request for the sectors in question.
128  */
blkdev_issue_discard(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned long flags)129 int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
130 		sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
131 {
132 	struct bio *bio = NULL;
133 	struct blk_plug plug;
134 	int ret;
135 
136 	blk_start_plug(&plug);
137 	ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, flags,
138 			&bio);
139 	if (!ret && bio) {
140 		ret = submit_bio_wait(bio);
141 		if (ret == -EOPNOTSUPP)
142 			ret = 0;
143 		bio_put(bio);
144 	}
145 	blk_finish_plug(&plug);
146 
147 	return ret;
148 }
149 EXPORT_SYMBOL(blkdev_issue_discard);
150 
151 /**
152  * __blkdev_issue_write_same - generate number of bios with same page
153  * @bdev:	target blockdev
154  * @sector:	start sector
155  * @nr_sects:	number of sectors to write
156  * @gfp_mask:	memory allocation flags (for bio_alloc)
157  * @page:	page containing data to write
158  * @biop:	pointer to anchor bio
159  *
160  * Description:
161  *  Generate and issue number of bios(REQ_OP_WRITE_SAME) with same page.
162  */
__blkdev_issue_write_same(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct page * page,struct bio ** biop)163 static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
164 		sector_t nr_sects, gfp_t gfp_mask, struct page *page,
165 		struct bio **biop)
166 {
167 	struct request_queue *q = bdev_get_queue(bdev);
168 	unsigned int max_write_same_sectors;
169 	struct bio *bio = *biop;
170 	sector_t bs_mask;
171 
172 	if (!q)
173 		return -ENXIO;
174 
175 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
176 	if ((sector | nr_sects) & bs_mask)
177 		return -EINVAL;
178 
179 	if (!bdev_write_same(bdev))
180 		return -EOPNOTSUPP;
181 
182 	/* Ensure that max_write_same_sectors doesn't overflow bi_size */
183 	max_write_same_sectors = UINT_MAX >> 9;
184 
185 	while (nr_sects) {
186 		bio = next_bio(bio, 1, gfp_mask);
187 		bio->bi_iter.bi_sector = sector;
188 		bio_set_dev(bio, bdev);
189 		bio->bi_vcnt = 1;
190 		bio->bi_io_vec->bv_page = page;
191 		bio->bi_io_vec->bv_offset = 0;
192 		bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
193 		bio_set_op_attrs(bio, REQ_OP_WRITE_SAME, 0);
194 
195 		if (nr_sects > max_write_same_sectors) {
196 			bio->bi_iter.bi_size = max_write_same_sectors << 9;
197 			nr_sects -= max_write_same_sectors;
198 			sector += max_write_same_sectors;
199 		} else {
200 			bio->bi_iter.bi_size = nr_sects << 9;
201 			nr_sects = 0;
202 		}
203 		cond_resched();
204 	}
205 
206 	*biop = bio;
207 	return 0;
208 }
209 
210 /**
211  * blkdev_issue_write_same - queue a write same operation
212  * @bdev:	target blockdev
213  * @sector:	start sector
214  * @nr_sects:	number of sectors to write
215  * @gfp_mask:	memory allocation flags (for bio_alloc)
216  * @page:	page containing data
217  *
218  * Description:
219  *    Issue a write same request for the sectors in question.
220  */
blkdev_issue_write_same(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct page * page)221 int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
222 				sector_t nr_sects, gfp_t gfp_mask,
223 				struct page *page)
224 {
225 	struct bio *bio = NULL;
226 	struct blk_plug plug;
227 	int ret;
228 
229 	blk_start_plug(&plug);
230 	ret = __blkdev_issue_write_same(bdev, sector, nr_sects, gfp_mask, page,
231 			&bio);
232 	if (ret == 0 && bio) {
233 		ret = submit_bio_wait(bio);
234 		bio_put(bio);
235 	}
236 	blk_finish_plug(&plug);
237 	return ret;
238 }
239 EXPORT_SYMBOL(blkdev_issue_write_same);
240 
__blkdev_issue_write_zeroes(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)241 static int __blkdev_issue_write_zeroes(struct block_device *bdev,
242 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
243 		struct bio **biop, unsigned flags)
244 {
245 	struct bio *bio = *biop;
246 	unsigned int max_write_zeroes_sectors;
247 	struct request_queue *q = bdev_get_queue(bdev);
248 
249 	if (!q)
250 		return -ENXIO;
251 
252 	/* Ensure that max_write_zeroes_sectors doesn't overflow bi_size */
253 	max_write_zeroes_sectors = bdev_write_zeroes_sectors(bdev);
254 
255 	if (max_write_zeroes_sectors == 0)
256 		return -EOPNOTSUPP;
257 
258 	while (nr_sects) {
259 		bio = next_bio(bio, 0, gfp_mask);
260 		bio->bi_iter.bi_sector = sector;
261 		bio_set_dev(bio, bdev);
262 		bio->bi_opf = REQ_OP_WRITE_ZEROES;
263 		if (flags & BLKDEV_ZERO_NOUNMAP)
264 			bio->bi_opf |= REQ_NOUNMAP;
265 
266 		if (nr_sects > max_write_zeroes_sectors) {
267 			bio->bi_iter.bi_size = max_write_zeroes_sectors << 9;
268 			nr_sects -= max_write_zeroes_sectors;
269 			sector += max_write_zeroes_sectors;
270 		} else {
271 			bio->bi_iter.bi_size = nr_sects << 9;
272 			nr_sects = 0;
273 		}
274 		cond_resched();
275 	}
276 
277 	*biop = bio;
278 	return 0;
279 }
280 
281 /*
282  * Convert a number of 512B sectors to a number of pages.
283  * The result is limited to a number of pages that can fit into a BIO.
284  * Also make sure that the result is always at least 1 (page) for the cases
285  * where nr_sects is lower than the number of sectors in a page.
286  */
__blkdev_sectors_to_bio_pages(sector_t nr_sects)287 static unsigned int __blkdev_sectors_to_bio_pages(sector_t nr_sects)
288 {
289 	sector_t pages = DIV_ROUND_UP_SECTOR_T(nr_sects, PAGE_SIZE / 512);
290 
291 	return min(pages, (sector_t)BIO_MAX_PAGES);
292 }
293 
__blkdev_issue_zero_pages(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop)294 static int __blkdev_issue_zero_pages(struct block_device *bdev,
295 		sector_t sector, sector_t nr_sects, gfp_t gfp_mask,
296 		struct bio **biop)
297 {
298 	struct request_queue *q = bdev_get_queue(bdev);
299 	struct bio *bio = *biop;
300 	int bi_size = 0;
301 	unsigned int sz;
302 
303 	if (!q)
304 		return -ENXIO;
305 
306 	while (nr_sects != 0) {
307 		bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
308 			       gfp_mask);
309 		bio->bi_iter.bi_sector = sector;
310 		bio_set_dev(bio, bdev);
311 		bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
312 
313 		while (nr_sects != 0) {
314 			sz = min((sector_t) PAGE_SIZE, nr_sects << 9);
315 			bi_size = bio_add_page(bio, ZERO_PAGE(0), sz, 0);
316 			nr_sects -= bi_size >> 9;
317 			sector += bi_size >> 9;
318 			if (bi_size < sz)
319 				break;
320 		}
321 		cond_resched();
322 	}
323 
324 	*biop = bio;
325 	return 0;
326 }
327 
328 /**
329  * __blkdev_issue_zeroout - generate number of zero filed write bios
330  * @bdev:	blockdev to issue
331  * @sector:	start sector
332  * @nr_sects:	number of sectors to write
333  * @gfp_mask:	memory allocation flags (for bio_alloc)
334  * @biop:	pointer to anchor bio
335  * @flags:	controls detailed behavior
336  *
337  * Description:
338  *  Zero-fill a block range, either using hardware offload or by explicitly
339  *  writing zeroes to the device.
340  *
341  *  If a device is using logical block provisioning, the underlying space will
342  *  not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
343  *
344  *  If %flags contains BLKDEV_ZERO_NOFALLBACK, the function will return
345  *  -EOPNOTSUPP if no explicit hardware offload for zeroing is provided.
346  */
__blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,struct bio ** biop,unsigned flags)347 int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
348 		sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
349 		unsigned flags)
350 {
351 	int ret;
352 	sector_t bs_mask;
353 
354 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
355 	if ((sector | nr_sects) & bs_mask)
356 		return -EINVAL;
357 
358 	ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
359 			biop, flags);
360 	if (ret != -EOPNOTSUPP || (flags & BLKDEV_ZERO_NOFALLBACK))
361 		return ret;
362 
363 	return __blkdev_issue_zero_pages(bdev, sector, nr_sects, gfp_mask,
364 					 biop);
365 }
366 EXPORT_SYMBOL(__blkdev_issue_zeroout);
367 
368 /**
369  * blkdev_issue_zeroout - zero-fill a block range
370  * @bdev:	blockdev to write
371  * @sector:	start sector
372  * @nr_sects:	number of sectors to write
373  * @gfp_mask:	memory allocation flags (for bio_alloc)
374  * @flags:	controls detailed behavior
375  *
376  * Description:
377  *  Zero-fill a block range, either using hardware offload or by explicitly
378  *  writing zeroes to the device.  See __blkdev_issue_zeroout() for the
379  *  valid values for %flags.
380  */
blkdev_issue_zeroout(struct block_device * bdev,sector_t sector,sector_t nr_sects,gfp_t gfp_mask,unsigned flags)381 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
382 		sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
383 {
384 	int ret = 0;
385 	sector_t bs_mask;
386 	struct bio *bio;
387 	struct blk_plug plug;
388 	bool try_write_zeroes = !!bdev_write_zeroes_sectors(bdev);
389 
390 	bs_mask = (bdev_logical_block_size(bdev) >> 9) - 1;
391 	if ((sector | nr_sects) & bs_mask)
392 		return -EINVAL;
393 
394 retry:
395 	bio = NULL;
396 	blk_start_plug(&plug);
397 	if (try_write_zeroes) {
398 		ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects,
399 						  gfp_mask, &bio, flags);
400 	} else if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
401 		ret = __blkdev_issue_zero_pages(bdev, sector, nr_sects,
402 						gfp_mask, &bio);
403 	} else {
404 		/* No zeroing offload support */
405 		ret = -EOPNOTSUPP;
406 	}
407 	if (ret == 0 && bio) {
408 		ret = submit_bio_wait(bio);
409 		bio_put(bio);
410 	}
411 	blk_finish_plug(&plug);
412 	if (ret && try_write_zeroes) {
413 		if (!(flags & BLKDEV_ZERO_NOFALLBACK)) {
414 			try_write_zeroes = false;
415 			goto retry;
416 		}
417 		if (!bdev_write_zeroes_sectors(bdev)) {
418 			/*
419 			 * Zeroing offload support was indicated, but the
420 			 * device reported ILLEGAL REQUEST (for some devices
421 			 * there is no non-destructive way to verify whether
422 			 * WRITE ZEROES is actually supported).
423 			 */
424 			ret = -EOPNOTSUPP;
425 		}
426 	}
427 
428 	return ret;
429 }
430 EXPORT_SYMBOL(blkdev_issue_zeroout);
431