1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010, 2023 Red Hat, Inc.
4  * All Rights Reserved.
5  */
6 #include "xfs.h"
7 #include "xfs_shared.h"
8 #include "xfs_format.h"
9 #include "xfs_log_format.h"
10 #include "xfs_trans_resv.h"
11 #include "xfs_trans.h"
12 #include "xfs_mount.h"
13 #include "xfs_btree.h"
14 #include "xfs_alloc_btree.h"
15 #include "xfs_alloc.h"
16 #include "xfs_discard.h"
17 #include "xfs_error.h"
18 #include "xfs_extent_busy.h"
19 #include "xfs_trace.h"
20 #include "xfs_log.h"
21 #include "xfs_ag.h"
22 #include "xfs_health.h"
23 #include "xfs_rtbitmap.h"
24 
25 /*
26  * Notes on an efficient, low latency fstrim algorithm
27  *
28  * We need to walk the filesystem free space and issue discards on the free
29  * space that meet the search criteria (size and location). We cannot issue
30  * discards on extents that might be in use, or are so recently in use they are
31  * still marked as busy. To serialise against extent state changes whilst we are
32  * gathering extents to trim, we must hold the AGF lock to lock out other
33  * allocations and extent free operations that might change extent state.
34  *
35  * However, we cannot just hold the AGF for the entire AG free space walk whilst
36  * we issue discards on each free space that is found. Storage devices can have
37  * extremely slow discard implementations (e.g. ceph RBD) and so walking a
38  * couple of million free extents and issuing synchronous discards on each
39  * extent can take a *long* time. Whilst we are doing this walk, nothing else
40  * can access the AGF, and we can stall transactions and hence the log whilst
41  * modifications wait for the AGF lock to be released. This can lead hung tasks
42  * kicking the hung task timer and rebooting the system. This is bad.
43  *
44  * Hence we need to take a leaf from the bulkstat playbook. It takes the AGI
45  * lock, gathers a range of inode cluster buffers that are allocated, drops the
46  * AGI lock and then reads all the inode cluster buffers and processes them. It
47  * loops doing this, using a cursor to keep track of where it is up to in the AG
48  * for each iteration to restart the INOBT lookup from.
49  *
50  * We can't do this exactly with free space - once we drop the AGF lock, the
51  * state of the free extent is out of our control and we cannot run a discard
52  * safely on it in this situation. Unless, of course, we've marked the free
53  * extent as busy and undergoing a discard operation whilst we held the AGF
54  * locked.
55  *
56  * This is exactly how online discard works - free extents are marked busy when
57  * they are freed, and once the extent free has been committed to the journal,
58  * the busy extent record is marked as "undergoing discard" and the discard is
59  * then issued on the free extent. Once the discard completes, the busy extent
60  * record is removed and the extent is able to be allocated again.
61  *
62  * In the context of fstrim, if we find a free extent we need to discard, we
63  * don't have to discard it immediately. All we need to do it record that free
64  * extent as being busy and under discard, and all the allocation routines will
65  * now avoid trying to allocate it. Hence if we mark the extent as busy under
66  * the AGF lock, we can safely discard it without holding the AGF lock because
67  * nothing will attempt to allocate that free space until the discard completes.
68  *
69  * This also allows us to issue discards asynchronously like we do with online
70  * discard, and so for fast devices fstrim will run much faster as we can have
71  * multiple discard operations in flight at once, as well as pipeline the free
72  * extent search so that it overlaps in flight discard IO.
73  */
74 
75 struct workqueue_struct *xfs_discard_wq;
76 
77 static void
xfs_discard_endio_work(struct work_struct * work)78 xfs_discard_endio_work(
79 	struct work_struct	*work)
80 {
81 	struct xfs_busy_extents	*extents =
82 		container_of(work, struct xfs_busy_extents, endio_work);
83 
84 	xfs_extent_busy_clear(extents->mount, &extents->extent_list, false);
85 	kfree(extents->owner);
86 }
87 
88 /*
89  * Queue up the actual completion to a thread to avoid IRQ-safe locking for
90  * pagb_lock.
91  */
92 static void
xfs_discard_endio(struct bio * bio)93 xfs_discard_endio(
94 	struct bio		*bio)
95 {
96 	struct xfs_busy_extents	*extents = bio->bi_private;
97 
98 	INIT_WORK(&extents->endio_work, xfs_discard_endio_work);
99 	queue_work(xfs_discard_wq, &extents->endio_work);
100 	bio_put(bio);
101 }
102 
103 /*
104  * Walk the discard list and issue discards on all the busy extents in the
105  * list. We plug and chain the bios so that we only need a single completion
106  * call to clear all the busy extents once the discards are complete.
107  */
108 int
xfs_discard_extents(struct xfs_mount * mp,struct xfs_busy_extents * extents)109 xfs_discard_extents(
110 	struct xfs_mount	*mp,
111 	struct xfs_busy_extents	*extents)
112 {
113 	struct xfs_extent_busy	*busyp;
114 	struct bio		*bio = NULL;
115 	struct blk_plug		plug;
116 	int			error = 0;
117 
118 	blk_start_plug(&plug);
119 	list_for_each_entry(busyp, &extents->extent_list, list) {
120 		trace_xfs_discard_extent(mp, busyp->agno, busyp->bno,
121 					 busyp->length);
122 
123 		error = __blkdev_issue_discard(mp->m_ddev_targp->bt_bdev,
124 				XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno),
125 				XFS_FSB_TO_BB(mp, busyp->length),
126 				GFP_KERNEL, &bio);
127 		if (error && error != -EOPNOTSUPP) {
128 			xfs_info(mp,
129 	 "discard failed for extent [0x%llx,%u], error %d",
130 				 (unsigned long long)busyp->bno,
131 				 busyp->length,
132 				 error);
133 			break;
134 		}
135 	}
136 
137 	if (bio) {
138 		bio->bi_private = extents;
139 		bio->bi_end_io = xfs_discard_endio;
140 		submit_bio(bio);
141 	} else {
142 		xfs_discard_endio_work(&extents->endio_work);
143 	}
144 	blk_finish_plug(&plug);
145 
146 	return error;
147 }
148 
149 /*
150  * Care must be taken setting up the trim cursor as the perags may not have been
151  * initialised when the cursor is initialised. e.g. a clean mount which hasn't
152  * read in AGFs and the first operation run on the mounted fs is a trim. This
153  * can result in perag fields that aren't initialised until
154  * xfs_trim_gather_extents() calls xfs_alloc_read_agf() to lock down the AG for
155  * the free space search.
156  */
157 struct xfs_trim_cur {
158 	xfs_agblock_t	start;
159 	xfs_extlen_t	count;
160 	xfs_agblock_t	end;
161 	xfs_extlen_t	minlen;
162 	bool		by_bno;
163 };
164 
165 static int
xfs_trim_gather_extents(struct xfs_perag * pag,struct xfs_trim_cur * tcur,struct xfs_busy_extents * extents)166 xfs_trim_gather_extents(
167 	struct xfs_perag	*pag,
168 	struct xfs_trim_cur	*tcur,
169 	struct xfs_busy_extents	*extents)
170 {
171 	struct xfs_mount	*mp = pag->pag_mount;
172 	struct xfs_trans	*tp;
173 	struct xfs_btree_cur	*cur;
174 	struct xfs_buf		*agbp;
175 	int			error;
176 	int			i;
177 	int			batch = 100;
178 
179 	/*
180 	 * Force out the log.  This means any transactions that might have freed
181 	 * space before we take the AGF buffer lock are now on disk, and the
182 	 * volatile disk cache is flushed.
183 	 */
184 	xfs_log_force(mp, XFS_LOG_SYNC);
185 
186 	error = xfs_trans_alloc_empty(mp, &tp);
187 	if (error)
188 		return error;
189 
190 	error = xfs_alloc_read_agf(pag, tp, 0, &agbp);
191 	if (error)
192 		goto out_trans_cancel;
193 
194 	/*
195 	 * First time through tcur->count will not have been initialised as
196 	 * pag->pagf_longest is not guaranteed to be valid before we read
197 	 * the AGF buffer above.
198 	 */
199 	if (!tcur->count)
200 		tcur->count = pag->pagf_longest;
201 
202 	if (tcur->by_bno) {
203 		/* sub-AG discard request always starts at tcur->start */
204 		cur = xfs_bnobt_init_cursor(mp, tp, agbp, pag);
205 		error = xfs_alloc_lookup_le(cur, tcur->start, 0, &i);
206 		if (!error && !i)
207 			error = xfs_alloc_lookup_ge(cur, tcur->start, 0, &i);
208 	} else if (tcur->start == 0) {
209 		/* first time through a by-len starts with max length */
210 		cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
211 		error = xfs_alloc_lookup_ge(cur, 0, tcur->count, &i);
212 	} else {
213 		/* nth time through a by-len starts where we left off */
214 		cur = xfs_cntbt_init_cursor(mp, tp, agbp, pag);
215 		error = xfs_alloc_lookup_le(cur, tcur->start, tcur->count, &i);
216 	}
217 	if (error)
218 		goto out_del_cursor;
219 	if (i == 0) {
220 		/* nothing of that length left in the AG, we are done */
221 		tcur->count = 0;
222 		goto out_del_cursor;
223 	}
224 
225 	/*
226 	 * Loop until we are done with all extents that are large
227 	 * enough to be worth discarding or we hit batch limits.
228 	 */
229 	while (i) {
230 		xfs_agblock_t	fbno;
231 		xfs_extlen_t	flen;
232 
233 		error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
234 		if (error)
235 			break;
236 		if (XFS_IS_CORRUPT(mp, i != 1)) {
237 			xfs_btree_mark_sick(cur);
238 			error = -EFSCORRUPTED;
239 			break;
240 		}
241 
242 		if (--batch <= 0) {
243 			/*
244 			 * Update the cursor to point at this extent so we
245 			 * restart the next batch from this extent.
246 			 */
247 			tcur->start = fbno;
248 			tcur->count = flen;
249 			break;
250 		}
251 
252 		/*
253 		 * If the extent is entirely outside of the range we are
254 		 * supposed to skip it.  Do not bother to trim down partially
255 		 * overlapping ranges for now.
256 		 */
257 		if (fbno + flen < tcur->start) {
258 			trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
259 			goto next_extent;
260 		}
261 		if (fbno > tcur->end) {
262 			trace_xfs_discard_exclude(mp, pag->pag_agno, fbno, flen);
263 			if (tcur->by_bno) {
264 				tcur->count = 0;
265 				break;
266 			}
267 			goto next_extent;
268 		}
269 
270 		/* Trim the extent returned to the range we want. */
271 		if (fbno < tcur->start) {
272 			flen -= tcur->start - fbno;
273 			fbno = tcur->start;
274 		}
275 		if (fbno + flen > tcur->end + 1)
276 			flen = tcur->end - fbno + 1;
277 
278 		/* Too small?  Give up. */
279 		if (flen < tcur->minlen) {
280 			trace_xfs_discard_toosmall(mp, pag->pag_agno, fbno, flen);
281 			if (tcur->by_bno)
282 				goto next_extent;
283 			tcur->count = 0;
284 			break;
285 		}
286 
287 		/*
288 		 * If any blocks in the range are still busy, skip the
289 		 * discard and try again the next time.
290 		 */
291 		if (xfs_extent_busy_search(mp, pag, fbno, flen)) {
292 			trace_xfs_discard_busy(mp, pag->pag_agno, fbno, flen);
293 			goto next_extent;
294 		}
295 
296 		xfs_extent_busy_insert_discard(pag, fbno, flen,
297 				&extents->extent_list);
298 next_extent:
299 		if (tcur->by_bno)
300 			error = xfs_btree_increment(cur, 0, &i);
301 		else
302 			error = xfs_btree_decrement(cur, 0, &i);
303 		if (error)
304 			break;
305 
306 		/*
307 		 * If there's no more records in the tree, we are done. Set the
308 		 * cursor block count to 0 to indicate to the caller that there
309 		 * is no more extents to search.
310 		 */
311 		if (i == 0)
312 			tcur->count = 0;
313 	}
314 
315 	/*
316 	 * If there was an error, release all the gathered busy extents because
317 	 * we aren't going to issue a discard on them any more.
318 	 */
319 	if (error)
320 		xfs_extent_busy_clear(mp, &extents->extent_list, false);
321 out_del_cursor:
322 	xfs_btree_del_cursor(cur, error);
323 out_trans_cancel:
324 	xfs_trans_cancel(tp);
325 	return error;
326 }
327 
328 static bool
xfs_trim_should_stop(void)329 xfs_trim_should_stop(void)
330 {
331 	return fatal_signal_pending(current) || freezing(current);
332 }
333 
334 /*
335  * Iterate the free list gathering extents and discarding them. We need a cursor
336  * for the repeated iteration of gather/discard loop, so use the longest extent
337  * we found in the last batch as the key to start the next.
338  */
339 static int
xfs_trim_perag_extents(struct xfs_perag * pag,xfs_agblock_t start,xfs_agblock_t end,xfs_extlen_t minlen)340 xfs_trim_perag_extents(
341 	struct xfs_perag	*pag,
342 	xfs_agblock_t		start,
343 	xfs_agblock_t		end,
344 	xfs_extlen_t		minlen)
345 {
346 	struct xfs_trim_cur	tcur = {
347 		.start		= start,
348 		.end		= end,
349 		.minlen		= minlen,
350 	};
351 	int			error = 0;
352 
353 	if (start != 0 || end != pag->block_count)
354 		tcur.by_bno = true;
355 
356 	do {
357 		struct xfs_busy_extents	*extents;
358 
359 		extents = kzalloc(sizeof(*extents), GFP_KERNEL);
360 		if (!extents) {
361 			error = -ENOMEM;
362 			break;
363 		}
364 
365 		extents->mount = pag->pag_mount;
366 		extents->owner = extents;
367 		INIT_LIST_HEAD(&extents->extent_list);
368 
369 		error = xfs_trim_gather_extents(pag, &tcur, extents);
370 		if (error) {
371 			kfree(extents);
372 			break;
373 		}
374 
375 		/*
376 		 * We hand the extent list to the discard function here so the
377 		 * discarded extents can be removed from the busy extent list.
378 		 * This allows the discards to run asynchronously with gathering
379 		 * the next round of extents to discard.
380 		 *
381 		 * However, we must ensure that we do not reference the extent
382 		 * list  after this function call, as it may have been freed by
383 		 * the time control returns to us.
384 		 */
385 		error = xfs_discard_extents(pag->pag_mount, extents);
386 		if (error)
387 			break;
388 
389 		if (xfs_trim_should_stop())
390 			break;
391 
392 	} while (tcur.count != 0);
393 
394 	return error;
395 
396 }
397 
398 static int
xfs_trim_datadev_extents(struct xfs_mount * mp,xfs_daddr_t start,xfs_daddr_t end,xfs_extlen_t minlen)399 xfs_trim_datadev_extents(
400 	struct xfs_mount	*mp,
401 	xfs_daddr_t		start,
402 	xfs_daddr_t		end,
403 	xfs_extlen_t		minlen)
404 {
405 	xfs_agnumber_t		start_agno, end_agno;
406 	xfs_agblock_t		start_agbno, end_agbno;
407 	xfs_daddr_t		ddev_end;
408 	struct xfs_perag	*pag;
409 	int			last_error = 0, error;
410 
411 	ddev_end = min_t(xfs_daddr_t, end,
412 			 XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks) - 1);
413 
414 	start_agno = xfs_daddr_to_agno(mp, start);
415 	start_agbno = xfs_daddr_to_agbno(mp, start);
416 	end_agno = xfs_daddr_to_agno(mp, ddev_end);
417 	end_agbno = xfs_daddr_to_agbno(mp, ddev_end);
418 
419 	for_each_perag_range(mp, start_agno, end_agno, pag) {
420 		xfs_agblock_t	agend = pag->block_count;
421 
422 		if (start_agno == end_agno)
423 			agend = end_agbno;
424 		error = xfs_trim_perag_extents(pag, start_agbno, agend, minlen);
425 		if (error)
426 			last_error = error;
427 
428 		if (xfs_trim_should_stop()) {
429 			xfs_perag_rele(pag);
430 			break;
431 		}
432 		start_agbno = 0;
433 	}
434 
435 	return last_error;
436 }
437 
438 #ifdef CONFIG_XFS_RT
439 struct xfs_trim_rtdev {
440 	/* list of rt extents to free */
441 	struct list_head	extent_list;
442 
443 	/* minimum length that caller allows us to trim */
444 	xfs_rtblock_t		minlen_fsb;
445 
446 	/* restart point for the rtbitmap walk */
447 	xfs_rtxnum_t		restart_rtx;
448 
449 	/* stopping point for the current rtbitmap walk */
450 	xfs_rtxnum_t		stop_rtx;
451 };
452 
453 struct xfs_rtx_busy {
454 	struct list_head	list;
455 	xfs_rtblock_t		bno;
456 	xfs_rtblock_t		length;
457 };
458 
459 static void
xfs_discard_free_rtdev_extents(struct xfs_trim_rtdev * tr)460 xfs_discard_free_rtdev_extents(
461 	struct xfs_trim_rtdev	*tr)
462 {
463 	struct xfs_rtx_busy	*busyp, *n;
464 
465 	list_for_each_entry_safe(busyp, n, &tr->extent_list, list) {
466 		list_del_init(&busyp->list);
467 		kfree(busyp);
468 	}
469 }
470 
471 /*
472  * Walk the discard list and issue discards on all the busy extents in the
473  * list. We plug and chain the bios so that we only need a single completion
474  * call to clear all the busy extents once the discards are complete.
475  */
476 static int
xfs_discard_rtdev_extents(struct xfs_mount * mp,struct xfs_trim_rtdev * tr)477 xfs_discard_rtdev_extents(
478 	struct xfs_mount	*mp,
479 	struct xfs_trim_rtdev	*tr)
480 {
481 	struct block_device	*bdev = mp->m_rtdev_targp->bt_bdev;
482 	struct xfs_rtx_busy	*busyp;
483 	struct bio		*bio = NULL;
484 	struct blk_plug		plug;
485 	xfs_rtblock_t		start = NULLRTBLOCK, length = 0;
486 	int			error = 0;
487 
488 	blk_start_plug(&plug);
489 	list_for_each_entry(busyp, &tr->extent_list, list) {
490 		if (start == NULLRTBLOCK)
491 			start = busyp->bno;
492 		length += busyp->length;
493 
494 		trace_xfs_discard_rtextent(mp, busyp->bno, busyp->length);
495 
496 		error = __blkdev_issue_discard(bdev,
497 				XFS_FSB_TO_BB(mp, busyp->bno),
498 				XFS_FSB_TO_BB(mp, busyp->length),
499 				GFP_NOFS, &bio);
500 		if (error)
501 			break;
502 	}
503 	xfs_discard_free_rtdev_extents(tr);
504 
505 	if (bio) {
506 		error = submit_bio_wait(bio);
507 		if (error == -EOPNOTSUPP)
508 			error = 0;
509 		if (error)
510 			xfs_info(mp,
511 	 "discard failed for rtextent [0x%llx,%llu], error %d",
512 				 (unsigned long long)start,
513 				 (unsigned long long)length,
514 				 error);
515 		bio_put(bio);
516 	}
517 	blk_finish_plug(&plug);
518 
519 	return error;
520 }
521 
522 static int
xfs_trim_gather_rtextent(struct xfs_mount * mp,struct xfs_trans * tp,const struct xfs_rtalloc_rec * rec,void * priv)523 xfs_trim_gather_rtextent(
524 	struct xfs_mount		*mp,
525 	struct xfs_trans		*tp,
526 	const struct xfs_rtalloc_rec	*rec,
527 	void				*priv)
528 {
529 	struct xfs_trim_rtdev		*tr = priv;
530 	struct xfs_rtx_busy		*busyp;
531 	xfs_rtblock_t			rbno, rlen;
532 
533 	if (rec->ar_startext > tr->stop_rtx) {
534 		/*
535 		 * If we've scanned a large number of rtbitmap blocks, update
536 		 * the cursor to point at this extent so we restart the next
537 		 * batch from this extent.
538 		 */
539 		tr->restart_rtx = rec->ar_startext;
540 		return -ECANCELED;
541 	}
542 
543 	rbno = xfs_rtx_to_rtb(mp, rec->ar_startext);
544 	rlen = xfs_rtx_to_rtb(mp, rec->ar_extcount);
545 
546 	/* Ignore too small. */
547 	if (rlen < tr->minlen_fsb) {
548 		trace_xfs_discard_rttoosmall(mp, rbno, rlen);
549 		return 0;
550 	}
551 
552 	busyp = kzalloc(sizeof(struct xfs_rtx_busy), GFP_KERNEL);
553 	if (!busyp)
554 		return -ENOMEM;
555 
556 	busyp->bno = rbno;
557 	busyp->length = rlen;
558 	INIT_LIST_HEAD(&busyp->list);
559 	list_add_tail(&busyp->list, &tr->extent_list);
560 
561 	tr->restart_rtx = rec->ar_startext + rec->ar_extcount;
562 	return 0;
563 }
564 
565 static int
xfs_trim_rtdev_extents(struct xfs_mount * mp,xfs_daddr_t start,xfs_daddr_t end,xfs_daddr_t minlen)566 xfs_trim_rtdev_extents(
567 	struct xfs_mount	*mp,
568 	xfs_daddr_t		start,
569 	xfs_daddr_t		end,
570 	xfs_daddr_t		minlen)
571 {
572 	struct xfs_trim_rtdev	tr = {
573 		.minlen_fsb	= XFS_BB_TO_FSB(mp, minlen),
574 	};
575 	xfs_rtxnum_t		low, high;
576 	struct xfs_trans	*tp;
577 	xfs_daddr_t		rtdev_daddr;
578 	int			error;
579 
580 	INIT_LIST_HEAD(&tr.extent_list);
581 
582 	/* Shift the start and end downwards to match the rt device. */
583 	rtdev_daddr = XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks);
584 	if (start > rtdev_daddr)
585 		start -= rtdev_daddr;
586 	else
587 		start = 0;
588 
589 	if (end <= rtdev_daddr)
590 		return 0;
591 	end -= rtdev_daddr;
592 
593 	error = xfs_trans_alloc_empty(mp, &tp);
594 	if (error)
595 		return error;
596 
597 	end = min_t(xfs_daddr_t, end,
598 			XFS_FSB_TO_BB(mp, mp->m_sb.sb_rblocks) - 1);
599 
600 	/* Convert the rt blocks to rt extents */
601 	low = xfs_rtb_to_rtxup(mp, XFS_BB_TO_FSB(mp, start));
602 	high = xfs_rtb_to_rtx(mp, XFS_BB_TO_FSBT(mp, end));
603 
604 	/*
605 	 * Walk the free ranges between low and high.  The query_range function
606 	 * trims the extents returned.
607 	 */
608 	do {
609 		tr.stop_rtx = low + (mp->m_sb.sb_blocksize * NBBY);
610 		xfs_rtbitmap_lock_shared(mp, XFS_RBMLOCK_BITMAP);
611 		error = xfs_rtalloc_query_range(mp, tp, low, high,
612 				xfs_trim_gather_rtextent, &tr);
613 
614 		if (error == -ECANCELED)
615 			error = 0;
616 		if (error) {
617 			xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
618 			xfs_discard_free_rtdev_extents(&tr);
619 			break;
620 		}
621 
622 		if (list_empty(&tr.extent_list)) {
623 			xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
624 			break;
625 		}
626 
627 		error = xfs_discard_rtdev_extents(mp, &tr);
628 		xfs_rtbitmap_unlock_shared(mp, XFS_RBMLOCK_BITMAP);
629 		if (error)
630 			break;
631 
632 		low = tr.restart_rtx;
633 	} while (!xfs_trim_should_stop() && low <= high);
634 
635 	xfs_trans_cancel(tp);
636 	return error;
637 }
638 #else
639 # define xfs_trim_rtdev_extents(...)	(-EOPNOTSUPP)
640 #endif /* CONFIG_XFS_RT */
641 
642 /*
643  * trim a range of the filesystem.
644  *
645  * Note: the parameters passed from userspace are byte ranges into the
646  * filesystem which does not match to the format we use for filesystem block
647  * addressing. FSB addressing is sparse (AGNO|AGBNO), while the incoming format
648  * is a linear address range. Hence we need to use DADDR based conversions and
649  * comparisons for determining the correct offset and regions to trim.
650  *
651  * The realtime device is mapped into the FITRIM "address space" immediately
652  * after the data device.
653  */
654 int
xfs_ioc_trim(struct xfs_mount * mp,struct fstrim_range __user * urange)655 xfs_ioc_trim(
656 	struct xfs_mount		*mp,
657 	struct fstrim_range __user	*urange)
658 {
659 	unsigned int		granularity =
660 		bdev_discard_granularity(mp->m_ddev_targp->bt_bdev);
661 	struct block_device	*rt_bdev = NULL;
662 	struct fstrim_range	range;
663 	xfs_daddr_t		start, end;
664 	xfs_extlen_t		minlen;
665 	xfs_rfsblock_t		max_blocks;
666 	int			error, last_error = 0;
667 
668 	if (!capable(CAP_SYS_ADMIN))
669 		return -EPERM;
670 	if (mp->m_rtdev_targp &&
671 	    bdev_max_discard_sectors(mp->m_rtdev_targp->bt_bdev))
672 		rt_bdev = mp->m_rtdev_targp->bt_bdev;
673 	if (!bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev) && !rt_bdev)
674 		return -EOPNOTSUPP;
675 
676 	if (rt_bdev)
677 		granularity = max(granularity,
678 				  bdev_discard_granularity(rt_bdev));
679 
680 	/*
681 	 * We haven't recovered the log, so we cannot use our bnobt-guided
682 	 * storage zapping commands.
683 	 */
684 	if (xfs_has_norecovery(mp))
685 		return -EROFS;
686 
687 	if (copy_from_user(&range, urange, sizeof(range)))
688 		return -EFAULT;
689 
690 	range.minlen = max_t(u64, granularity, range.minlen);
691 	minlen = XFS_B_TO_FSB(mp, range.minlen);
692 
693 	/*
694 	 * Truncating down the len isn't actually quite correct, but using
695 	 * BBTOB would mean we trivially get overflows for values
696 	 * of ULLONG_MAX or slightly lower.  And ULLONG_MAX is the default
697 	 * used by the fstrim application.  In the end it really doesn't
698 	 * matter as trimming blocks is an advisory interface.
699 	 */
700 	max_blocks = mp->m_sb.sb_dblocks + mp->m_sb.sb_rblocks;
701 	if (range.start >= XFS_FSB_TO_B(mp, max_blocks) ||
702 	    range.minlen > XFS_FSB_TO_B(mp, mp->m_ag_max_usable) ||
703 	    range.len < mp->m_sb.sb_blocksize)
704 		return -EINVAL;
705 
706 	start = BTOBB(range.start);
707 	end = start + BTOBBT(range.len) - 1;
708 
709 	if (bdev_max_discard_sectors(mp->m_ddev_targp->bt_bdev)) {
710 		error = xfs_trim_datadev_extents(mp, start, end, minlen);
711 		if (error)
712 			last_error = error;
713 	}
714 
715 	if (rt_bdev && !xfs_trim_should_stop()) {
716 		error = xfs_trim_rtdev_extents(mp, start, end, minlen);
717 		if (error)
718 			last_error = error;
719 	}
720 
721 	if (last_error)
722 		return last_error;
723 
724 	range.len = min_t(unsigned long long, range.len,
725 			  XFS_FSB_TO_B(mp, max_blocks) - range.start);
726 	if (copy_to_user(urange, &range, sizeof(range)))
727 		return -EFAULT;
728 	return 0;
729 }
730