1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sb.h"
13 #include "xfs_mount.h"
14 #include "xfs_inode.h"
15 #include "xfs_trans.h"
16 #include "xfs_trans_priv.h"
17 #include "xfs_inode_item.h"
18 #include "xfs_quota.h"
19 #include "xfs_trace.h"
20 #include "xfs_icache.h"
21 #include "xfs_bmap_util.h"
22 #include "xfs_dquot_item.h"
23 #include "xfs_dquot.h"
24 #include "xfs_reflink.h"
25 #include "xfs_ialloc.h"
26
27 #include <linux/iversion.h>
28
29 /*
30 * Allocate and initialise an xfs_inode.
31 */
32 struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)33 xfs_inode_alloc(
34 struct xfs_mount *mp,
35 xfs_ino_t ino)
36 {
37 struct xfs_inode *ip;
38
39 /*
40 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
41 * and return NULL here on ENOMEM.
42 */
43 ip = kmem_cache_alloc(xfs_inode_zone, GFP_KERNEL | __GFP_NOFAIL);
44
45 if (inode_init_always(mp->m_super, VFS_I(ip))) {
46 kmem_cache_free(xfs_inode_zone, ip);
47 return NULL;
48 }
49
50 /* VFS doesn't initialise i_mode! */
51 VFS_I(ip)->i_mode = 0;
52
53 XFS_STATS_INC(mp, vn_active);
54 ASSERT(atomic_read(&ip->i_pincount) == 0);
55 ASSERT(ip->i_ino == 0);
56
57 /* initialise the xfs inode */
58 ip->i_ino = ino;
59 ip->i_mount = mp;
60 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
61 ip->i_afp = NULL;
62 ip->i_cowfp = NULL;
63 memset(&ip->i_df, 0, sizeof(ip->i_df));
64 ip->i_flags = 0;
65 ip->i_delayed_blks = 0;
66 memset(&ip->i_d, 0, sizeof(ip->i_d));
67 ip->i_sick = 0;
68 ip->i_checked = 0;
69 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
70 INIT_LIST_HEAD(&ip->i_ioend_list);
71 spin_lock_init(&ip->i_ioend_lock);
72
73 return ip;
74 }
75
76 STATIC void
xfs_inode_free_callback(struct rcu_head * head)77 xfs_inode_free_callback(
78 struct rcu_head *head)
79 {
80 struct inode *inode = container_of(head, struct inode, i_rcu);
81 struct xfs_inode *ip = XFS_I(inode);
82
83 switch (VFS_I(ip)->i_mode & S_IFMT) {
84 case S_IFREG:
85 case S_IFDIR:
86 case S_IFLNK:
87 xfs_idestroy_fork(&ip->i_df);
88 break;
89 }
90
91 if (ip->i_afp) {
92 xfs_idestroy_fork(ip->i_afp);
93 kmem_cache_free(xfs_ifork_zone, ip->i_afp);
94 }
95 if (ip->i_cowfp) {
96 xfs_idestroy_fork(ip->i_cowfp);
97 kmem_cache_free(xfs_ifork_zone, ip->i_cowfp);
98 }
99 if (ip->i_itemp) {
100 ASSERT(!test_bit(XFS_LI_IN_AIL,
101 &ip->i_itemp->ili_item.li_flags));
102 xfs_inode_item_destroy(ip);
103 ip->i_itemp = NULL;
104 }
105
106 kmem_cache_free(xfs_inode_zone, ip);
107 }
108
109 static void
__xfs_inode_free(struct xfs_inode * ip)110 __xfs_inode_free(
111 struct xfs_inode *ip)
112 {
113 /* asserts to verify all state is correct here */
114 ASSERT(atomic_read(&ip->i_pincount) == 0);
115 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
116 XFS_STATS_DEC(ip->i_mount, vn_active);
117
118 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
119 }
120
121 void
xfs_inode_free(struct xfs_inode * ip)122 xfs_inode_free(
123 struct xfs_inode *ip)
124 {
125 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
126
127 /*
128 * Because we use RCU freeing we need to ensure the inode always
129 * appears to be reclaimed with an invalid inode number when in the
130 * free state. The ip->i_flags_lock provides the barrier against lookup
131 * races.
132 */
133 spin_lock(&ip->i_flags_lock);
134 ip->i_flags = XFS_IRECLAIM;
135 ip->i_ino = 0;
136 spin_unlock(&ip->i_flags_lock);
137
138 __xfs_inode_free(ip);
139 }
140
141 /*
142 * Queue background inode reclaim work if there are reclaimable inodes and there
143 * isn't reclaim work already scheduled or in progress.
144 */
145 static void
xfs_reclaim_work_queue(struct xfs_mount * mp)146 xfs_reclaim_work_queue(
147 struct xfs_mount *mp)
148 {
149
150 rcu_read_lock();
151 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
152 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
153 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
154 }
155 rcu_read_unlock();
156 }
157
158 static void
xfs_perag_set_reclaim_tag(struct xfs_perag * pag)159 xfs_perag_set_reclaim_tag(
160 struct xfs_perag *pag)
161 {
162 struct xfs_mount *mp = pag->pag_mount;
163
164 lockdep_assert_held(&pag->pag_ici_lock);
165 if (pag->pag_ici_reclaimable++)
166 return;
167
168 /* propagate the reclaim tag up into the perag radix tree */
169 spin_lock(&mp->m_perag_lock);
170 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
171 XFS_ICI_RECLAIM_TAG);
172 spin_unlock(&mp->m_perag_lock);
173
174 /* schedule periodic background inode reclaim */
175 xfs_reclaim_work_queue(mp);
176
177 trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
178 }
179
180 static void
xfs_perag_clear_reclaim_tag(struct xfs_perag * pag)181 xfs_perag_clear_reclaim_tag(
182 struct xfs_perag *pag)
183 {
184 struct xfs_mount *mp = pag->pag_mount;
185
186 lockdep_assert_held(&pag->pag_ici_lock);
187 if (--pag->pag_ici_reclaimable)
188 return;
189
190 /* clear the reclaim tag from the perag radix tree */
191 spin_lock(&mp->m_perag_lock);
192 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
193 XFS_ICI_RECLAIM_TAG);
194 spin_unlock(&mp->m_perag_lock);
195 trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
196 }
197
198
199 /*
200 * We set the inode flag atomically with the radix tree tag.
201 * Once we get tag lookups on the radix tree, this inode flag
202 * can go away.
203 */
204 void
xfs_inode_set_reclaim_tag(struct xfs_inode * ip)205 xfs_inode_set_reclaim_tag(
206 struct xfs_inode *ip)
207 {
208 struct xfs_mount *mp = ip->i_mount;
209 struct xfs_perag *pag;
210
211 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
212 spin_lock(&pag->pag_ici_lock);
213 spin_lock(&ip->i_flags_lock);
214
215 radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
216 XFS_ICI_RECLAIM_TAG);
217 xfs_perag_set_reclaim_tag(pag);
218 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
219
220 spin_unlock(&ip->i_flags_lock);
221 spin_unlock(&pag->pag_ici_lock);
222 xfs_perag_put(pag);
223 }
224
225 STATIC void
xfs_inode_clear_reclaim_tag(struct xfs_perag * pag,xfs_ino_t ino)226 xfs_inode_clear_reclaim_tag(
227 struct xfs_perag *pag,
228 xfs_ino_t ino)
229 {
230 radix_tree_tag_clear(&pag->pag_ici_root,
231 XFS_INO_TO_AGINO(pag->pag_mount, ino),
232 XFS_ICI_RECLAIM_TAG);
233 xfs_perag_clear_reclaim_tag(pag);
234 }
235
236 static void
xfs_inew_wait(struct xfs_inode * ip)237 xfs_inew_wait(
238 struct xfs_inode *ip)
239 {
240 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
241 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
242
243 do {
244 prepare_to_wait(wq, &wait.wq_entry, TASK_UNINTERRUPTIBLE);
245 if (!xfs_iflags_test(ip, XFS_INEW))
246 break;
247 schedule();
248 } while (true);
249 finish_wait(wq, &wait.wq_entry);
250 }
251
252 /*
253 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
254 * part of the structure. This is made more complex by the fact we store
255 * information about the on-disk values in the VFS inode and so we can't just
256 * overwrite the values unconditionally. Hence we save the parameters we
257 * need to retain across reinitialisation, and rewrite them into the VFS inode
258 * after reinitialisation even if it fails.
259 */
260 static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)261 xfs_reinit_inode(
262 struct xfs_mount *mp,
263 struct inode *inode)
264 {
265 int error;
266 uint32_t nlink = inode->i_nlink;
267 uint32_t generation = inode->i_generation;
268 uint64_t version = inode_peek_iversion(inode);
269 umode_t mode = inode->i_mode;
270 dev_t dev = inode->i_rdev;
271 kuid_t uid = inode->i_uid;
272 kgid_t gid = inode->i_gid;
273
274 error = inode_init_always(mp->m_super, inode);
275
276 set_nlink(inode, nlink);
277 inode->i_generation = generation;
278 inode_set_iversion_queried(inode, version);
279 inode->i_mode = mode;
280 inode->i_rdev = dev;
281 inode->i_uid = uid;
282 inode->i_gid = gid;
283 return error;
284 }
285
286 /*
287 * If we are allocating a new inode, then check what was returned is
288 * actually a free, empty inode. If we are not allocating an inode,
289 * then check we didn't find a free inode.
290 *
291 * Returns:
292 * 0 if the inode free state matches the lookup context
293 * -ENOENT if the inode is free and we are not allocating
294 * -EFSCORRUPTED if there is any state mismatch at all
295 */
296 static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)297 xfs_iget_check_free_state(
298 struct xfs_inode *ip,
299 int flags)
300 {
301 if (flags & XFS_IGET_CREATE) {
302 /* should be a free inode */
303 if (VFS_I(ip)->i_mode != 0) {
304 xfs_warn(ip->i_mount,
305 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
306 ip->i_ino, VFS_I(ip)->i_mode);
307 return -EFSCORRUPTED;
308 }
309
310 if (ip->i_d.di_nblocks != 0) {
311 xfs_warn(ip->i_mount,
312 "Corruption detected! Free inode 0x%llx has blocks allocated!",
313 ip->i_ino);
314 return -EFSCORRUPTED;
315 }
316 return 0;
317 }
318
319 /* should be an allocated inode */
320 if (VFS_I(ip)->i_mode == 0)
321 return -ENOENT;
322
323 return 0;
324 }
325
326 /*
327 * Check the validity of the inode we just found it the cache
328 */
329 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)330 xfs_iget_cache_hit(
331 struct xfs_perag *pag,
332 struct xfs_inode *ip,
333 xfs_ino_t ino,
334 int flags,
335 int lock_flags) __releases(RCU)
336 {
337 struct inode *inode = VFS_I(ip);
338 struct xfs_mount *mp = ip->i_mount;
339 int error;
340
341 /*
342 * check for re-use of an inode within an RCU grace period due to the
343 * radix tree nodes not being updated yet. We monitor for this by
344 * setting the inode number to zero before freeing the inode structure.
345 * If the inode has been reallocated and set up, then the inode number
346 * will not match, so check for that, too.
347 */
348 spin_lock(&ip->i_flags_lock);
349 if (ip->i_ino != ino) {
350 trace_xfs_iget_skip(ip);
351 XFS_STATS_INC(mp, xs_ig_frecycle);
352 error = -EAGAIN;
353 goto out_error;
354 }
355
356
357 /*
358 * If we are racing with another cache hit that is currently
359 * instantiating this inode or currently recycling it out of
360 * reclaimabe state, wait for the initialisation to complete
361 * before continuing.
362 *
363 * XXX(hch): eventually we should do something equivalent to
364 * wait_on_inode to wait for these flags to be cleared
365 * instead of polling for it.
366 */
367 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
368 trace_xfs_iget_skip(ip);
369 XFS_STATS_INC(mp, xs_ig_frecycle);
370 error = -EAGAIN;
371 goto out_error;
372 }
373
374 /*
375 * Check the inode free state is valid. This also detects lookup
376 * racing with unlinks.
377 */
378 error = xfs_iget_check_free_state(ip, flags);
379 if (error)
380 goto out_error;
381
382 /*
383 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
384 * Need to carefully get it back into useable state.
385 */
386 if (ip->i_flags & XFS_IRECLAIMABLE) {
387 trace_xfs_iget_reclaim(ip);
388
389 if (flags & XFS_IGET_INCORE) {
390 error = -EAGAIN;
391 goto out_error;
392 }
393
394 /*
395 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
396 * from stomping over us while we recycle the inode. We can't
397 * clear the radix tree reclaimable tag yet as it requires
398 * pag_ici_lock to be held exclusive.
399 */
400 ip->i_flags |= XFS_IRECLAIM;
401
402 spin_unlock(&ip->i_flags_lock);
403 rcu_read_unlock();
404
405 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
406 error = xfs_reinit_inode(mp, inode);
407 if (error) {
408 bool wake;
409 /*
410 * Re-initializing the inode failed, and we are in deep
411 * trouble. Try to re-add it to the reclaim list.
412 */
413 rcu_read_lock();
414 spin_lock(&ip->i_flags_lock);
415 wake = !!__xfs_iflags_test(ip, XFS_INEW);
416 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
417 if (wake)
418 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
419 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
420 trace_xfs_iget_reclaim_fail(ip);
421 goto out_error;
422 }
423
424 spin_lock(&pag->pag_ici_lock);
425 spin_lock(&ip->i_flags_lock);
426
427 /*
428 * Clear the per-lifetime state in the inode as we are now
429 * effectively a new inode and need to return to the initial
430 * state before reuse occurs.
431 */
432 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
433 ip->i_flags |= XFS_INEW;
434 xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
435 inode->i_state = I_NEW;
436 ip->i_sick = 0;
437 ip->i_checked = 0;
438
439 spin_unlock(&ip->i_flags_lock);
440 spin_unlock(&pag->pag_ici_lock);
441 } else {
442 /* If the VFS inode is being torn down, pause and try again. */
443 if (!igrab(inode)) {
444 trace_xfs_iget_skip(ip);
445 error = -EAGAIN;
446 goto out_error;
447 }
448
449 /* We've got a live one. */
450 spin_unlock(&ip->i_flags_lock);
451 rcu_read_unlock();
452 trace_xfs_iget_hit(ip);
453 }
454
455 if (lock_flags != 0)
456 xfs_ilock(ip, lock_flags);
457
458 if (!(flags & XFS_IGET_INCORE))
459 xfs_iflags_clear(ip, XFS_ISTALE);
460 XFS_STATS_INC(mp, xs_ig_found);
461
462 return 0;
463
464 out_error:
465 spin_unlock(&ip->i_flags_lock);
466 rcu_read_unlock();
467 return error;
468 }
469
470
471 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)472 xfs_iget_cache_miss(
473 struct xfs_mount *mp,
474 struct xfs_perag *pag,
475 xfs_trans_t *tp,
476 xfs_ino_t ino,
477 struct xfs_inode **ipp,
478 int flags,
479 int lock_flags)
480 {
481 struct xfs_inode *ip;
482 int error;
483 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
484 int iflags;
485
486 ip = xfs_inode_alloc(mp, ino);
487 if (!ip)
488 return -ENOMEM;
489
490 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
491 if (error)
492 goto out_destroy;
493
494 /*
495 * For version 5 superblocks, if we are initialising a new inode and we
496 * are not utilising the XFS_MOUNT_IKEEP inode cluster mode, we can
497 * simply build the new inode core with a random generation number.
498 *
499 * For version 4 (and older) superblocks, log recovery is dependent on
500 * the di_flushiter field being initialised from the current on-disk
501 * value and hence we must also read the inode off disk even when
502 * initializing new inodes.
503 */
504 if (xfs_sb_version_has_v3inode(&mp->m_sb) &&
505 (flags & XFS_IGET_CREATE) && !(mp->m_flags & XFS_MOUNT_IKEEP)) {
506 VFS_I(ip)->i_generation = prandom_u32();
507 } else {
508 struct xfs_dinode *dip;
509 struct xfs_buf *bp;
510
511 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &dip, &bp, 0);
512 if (error)
513 goto out_destroy;
514
515 error = xfs_inode_from_disk(ip, dip);
516 if (!error)
517 xfs_buf_set_ref(bp, XFS_INO_REF);
518 xfs_trans_brelse(tp, bp);
519
520 if (error)
521 goto out_destroy;
522 }
523
524 trace_xfs_iget_miss(ip);
525
526 /*
527 * Check the inode free state is valid. This also detects lookup
528 * racing with unlinks.
529 */
530 error = xfs_iget_check_free_state(ip, flags);
531 if (error)
532 goto out_destroy;
533
534 /*
535 * Preload the radix tree so we can insert safely under the
536 * write spinlock. Note that we cannot sleep inside the preload
537 * region. Since we can be called from transaction context, don't
538 * recurse into the file system.
539 */
540 if (radix_tree_preload(GFP_NOFS)) {
541 error = -EAGAIN;
542 goto out_destroy;
543 }
544
545 /*
546 * Because the inode hasn't been added to the radix-tree yet it can't
547 * be found by another thread, so we can do the non-sleeping lock here.
548 */
549 if (lock_flags) {
550 if (!xfs_ilock_nowait(ip, lock_flags))
551 BUG();
552 }
553
554 /*
555 * These values must be set before inserting the inode into the radix
556 * tree as the moment it is inserted a concurrent lookup (allowed by the
557 * RCU locking mechanism) can find it and that lookup must see that this
558 * is an inode currently under construction (i.e. that XFS_INEW is set).
559 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
560 * memory barrier that ensures this detection works correctly at lookup
561 * time.
562 */
563 iflags = XFS_INEW;
564 if (flags & XFS_IGET_DONTCACHE)
565 d_mark_dontcache(VFS_I(ip));
566 ip->i_udquot = NULL;
567 ip->i_gdquot = NULL;
568 ip->i_pdquot = NULL;
569 xfs_iflags_set(ip, iflags);
570
571 /* insert the new inode */
572 spin_lock(&pag->pag_ici_lock);
573 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
574 if (unlikely(error)) {
575 WARN_ON(error != -EEXIST);
576 XFS_STATS_INC(mp, xs_ig_dup);
577 error = -EAGAIN;
578 goto out_preload_end;
579 }
580 spin_unlock(&pag->pag_ici_lock);
581 radix_tree_preload_end();
582
583 *ipp = ip;
584 return 0;
585
586 out_preload_end:
587 spin_unlock(&pag->pag_ici_lock);
588 radix_tree_preload_end();
589 if (lock_flags)
590 xfs_iunlock(ip, lock_flags);
591 out_destroy:
592 __destroy_inode(VFS_I(ip));
593 xfs_inode_free(ip);
594 return error;
595 }
596
597 /*
598 * Look up an inode by number in the given file system. The inode is looked up
599 * in the cache held in each AG. If the inode is found in the cache, initialise
600 * the vfs inode if necessary.
601 *
602 * If it is not in core, read it in from the file system's device, add it to the
603 * cache and initialise the vfs inode.
604 *
605 * The inode is locked according to the value of the lock_flags parameter.
606 * Inode lookup is only done during metadata operations and not as part of the
607 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
608 */
609 int
xfs_iget(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,uint flags,uint lock_flags,struct xfs_inode ** ipp)610 xfs_iget(
611 struct xfs_mount *mp,
612 struct xfs_trans *tp,
613 xfs_ino_t ino,
614 uint flags,
615 uint lock_flags,
616 struct xfs_inode **ipp)
617 {
618 struct xfs_inode *ip;
619 struct xfs_perag *pag;
620 xfs_agino_t agino;
621 int error;
622
623 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
624
625 /* reject inode numbers outside existing AGs */
626 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
627 return -EINVAL;
628
629 XFS_STATS_INC(mp, xs_ig_attempts);
630
631 /* get the perag structure and ensure that it's inode capable */
632 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
633 agino = XFS_INO_TO_AGINO(mp, ino);
634
635 again:
636 error = 0;
637 rcu_read_lock();
638 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
639
640 if (ip) {
641 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
642 if (error)
643 goto out_error_or_again;
644 } else {
645 rcu_read_unlock();
646 if (flags & XFS_IGET_INCORE) {
647 error = -ENODATA;
648 goto out_error_or_again;
649 }
650 XFS_STATS_INC(mp, xs_ig_missed);
651
652 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
653 flags, lock_flags);
654 if (error)
655 goto out_error_or_again;
656 }
657 xfs_perag_put(pag);
658
659 *ipp = ip;
660
661 /*
662 * If we have a real type for an on-disk inode, we can setup the inode
663 * now. If it's a new inode being created, xfs_ialloc will handle it.
664 */
665 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
666 xfs_setup_existing_inode(ip);
667 return 0;
668
669 out_error_or_again:
670 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
671 delay(1);
672 goto again;
673 }
674 xfs_perag_put(pag);
675 return error;
676 }
677
678 /*
679 * "Is this a cached inode that's also allocated?"
680 *
681 * Look up an inode by number in the given file system. If the inode is
682 * in cache and isn't in purgatory, return 1 if the inode is allocated
683 * and 0 if it is not. For all other cases (not in cache, being torn
684 * down, etc.), return a negative error code.
685 *
686 * The caller has to prevent inode allocation and freeing activity,
687 * presumably by locking the AGI buffer. This is to ensure that an
688 * inode cannot transition from allocated to freed until the caller is
689 * ready to allow that. If the inode is in an intermediate state (new,
690 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
691 * inode is not in the cache, -ENOENT will be returned. The caller must
692 * deal with these scenarios appropriately.
693 *
694 * This is a specialized use case for the online scrubber; if you're
695 * reading this, you probably want xfs_iget.
696 */
697 int
xfs_icache_inode_is_allocated(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,bool * inuse)698 xfs_icache_inode_is_allocated(
699 struct xfs_mount *mp,
700 struct xfs_trans *tp,
701 xfs_ino_t ino,
702 bool *inuse)
703 {
704 struct xfs_inode *ip;
705 int error;
706
707 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
708 if (error)
709 return error;
710
711 *inuse = !!(VFS_I(ip)->i_mode);
712 xfs_irele(ip);
713 return 0;
714 }
715
716 /*
717 * The inode lookup is done in batches to keep the amount of lock traffic and
718 * radix tree lookups to a minimum. The batch size is a trade off between
719 * lookup reduction and stack usage. This is in the reclaim path, so we can't
720 * be too greedy.
721 */
722 #define XFS_LOOKUP_BATCH 32
723
724 /*
725 * Decide if the given @ip is eligible to be a part of the inode walk, and
726 * grab it if so. Returns true if it's ready to go or false if we should just
727 * ignore it.
728 */
729 STATIC bool
xfs_inode_walk_ag_grab(struct xfs_inode * ip,int flags)730 xfs_inode_walk_ag_grab(
731 struct xfs_inode *ip,
732 int flags)
733 {
734 struct inode *inode = VFS_I(ip);
735 bool newinos = !!(flags & XFS_INODE_WALK_INEW_WAIT);
736
737 ASSERT(rcu_read_lock_held());
738
739 /* Check for stale RCU freed inode */
740 spin_lock(&ip->i_flags_lock);
741 if (!ip->i_ino)
742 goto out_unlock_noent;
743
744 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
745 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
746 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
747 goto out_unlock_noent;
748 spin_unlock(&ip->i_flags_lock);
749
750 /* nothing to sync during shutdown */
751 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
752 return false;
753
754 /* If we can't grab the inode, it must on it's way to reclaim. */
755 if (!igrab(inode))
756 return false;
757
758 /* inode is valid */
759 return true;
760
761 out_unlock_noent:
762 spin_unlock(&ip->i_flags_lock);
763 return false;
764 }
765
766 /*
767 * For a given per-AG structure @pag, grab, @execute, and rele all incore
768 * inodes with the given radix tree @tag.
769 */
770 STATIC int
xfs_inode_walk_ag(struct xfs_perag * pag,int iter_flags,int (* execute)(struct xfs_inode * ip,void * args),void * args,int tag)771 xfs_inode_walk_ag(
772 struct xfs_perag *pag,
773 int iter_flags,
774 int (*execute)(struct xfs_inode *ip, void *args),
775 void *args,
776 int tag)
777 {
778 struct xfs_mount *mp = pag->pag_mount;
779 uint32_t first_index;
780 int last_error = 0;
781 int skipped;
782 bool done;
783 int nr_found;
784
785 restart:
786 done = false;
787 skipped = 0;
788 first_index = 0;
789 nr_found = 0;
790 do {
791 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
792 int error = 0;
793 int i;
794
795 rcu_read_lock();
796
797 if (tag == XFS_ICI_NO_TAG)
798 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
799 (void **)batch, first_index,
800 XFS_LOOKUP_BATCH);
801 else
802 nr_found = radix_tree_gang_lookup_tag(
803 &pag->pag_ici_root,
804 (void **) batch, first_index,
805 XFS_LOOKUP_BATCH, tag);
806
807 if (!nr_found) {
808 rcu_read_unlock();
809 break;
810 }
811
812 /*
813 * Grab the inodes before we drop the lock. if we found
814 * nothing, nr == 0 and the loop will be skipped.
815 */
816 for (i = 0; i < nr_found; i++) {
817 struct xfs_inode *ip = batch[i];
818
819 if (done || !xfs_inode_walk_ag_grab(ip, iter_flags))
820 batch[i] = NULL;
821
822 /*
823 * Update the index for the next lookup. Catch
824 * overflows into the next AG range which can occur if
825 * we have inodes in the last block of the AG and we
826 * are currently pointing to the last inode.
827 *
828 * Because we may see inodes that are from the wrong AG
829 * due to RCU freeing and reallocation, only update the
830 * index if it lies in this AG. It was a race that lead
831 * us to see this inode, so another lookup from the
832 * same index will not find it again.
833 */
834 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
835 continue;
836 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
837 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
838 done = true;
839 }
840
841 /* unlock now we've grabbed the inodes. */
842 rcu_read_unlock();
843
844 for (i = 0; i < nr_found; i++) {
845 if (!batch[i])
846 continue;
847 if ((iter_flags & XFS_INODE_WALK_INEW_WAIT) &&
848 xfs_iflags_test(batch[i], XFS_INEW))
849 xfs_inew_wait(batch[i]);
850 error = execute(batch[i], args);
851 xfs_irele(batch[i]);
852 if (error == -EAGAIN) {
853 skipped++;
854 continue;
855 }
856 if (error && last_error != -EFSCORRUPTED)
857 last_error = error;
858 }
859
860 /* bail out if the filesystem is corrupted. */
861 if (error == -EFSCORRUPTED)
862 break;
863
864 cond_resched();
865
866 } while (nr_found && !done);
867
868 if (skipped) {
869 delay(1);
870 goto restart;
871 }
872 return last_error;
873 }
874
875 /* Fetch the next (possibly tagged) per-AG structure. */
876 static inline struct xfs_perag *
xfs_inode_walk_get_perag(struct xfs_mount * mp,xfs_agnumber_t agno,int tag)877 xfs_inode_walk_get_perag(
878 struct xfs_mount *mp,
879 xfs_agnumber_t agno,
880 int tag)
881 {
882 if (tag == XFS_ICI_NO_TAG)
883 return xfs_perag_get(mp, agno);
884 return xfs_perag_get_tag(mp, agno, tag);
885 }
886
887 /*
888 * Call the @execute function on all incore inodes matching the radix tree
889 * @tag.
890 */
891 int
xfs_inode_walk(struct xfs_mount * mp,int iter_flags,int (* execute)(struct xfs_inode * ip,void * args),void * args,int tag)892 xfs_inode_walk(
893 struct xfs_mount *mp,
894 int iter_flags,
895 int (*execute)(struct xfs_inode *ip, void *args),
896 void *args,
897 int tag)
898 {
899 struct xfs_perag *pag;
900 int error = 0;
901 int last_error = 0;
902 xfs_agnumber_t ag;
903
904 ag = 0;
905 while ((pag = xfs_inode_walk_get_perag(mp, ag, tag))) {
906 ag = pag->pag_agno + 1;
907 error = xfs_inode_walk_ag(pag, iter_flags, execute, args, tag);
908 xfs_perag_put(pag);
909 if (error) {
910 last_error = error;
911 if (error == -EFSCORRUPTED)
912 break;
913 }
914 }
915 return last_error;
916 }
917
918 /*
919 * Background scanning to trim post-EOF preallocated space. This is queued
920 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
921 */
922 void
xfs_queue_eofblocks(struct xfs_mount * mp)923 xfs_queue_eofblocks(
924 struct xfs_mount *mp)
925 {
926 rcu_read_lock();
927 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
928 queue_delayed_work(mp->m_eofblocks_workqueue,
929 &mp->m_eofblocks_work,
930 msecs_to_jiffies(xfs_eofb_secs * 1000));
931 rcu_read_unlock();
932 }
933
934 void
xfs_eofblocks_worker(struct work_struct * work)935 xfs_eofblocks_worker(
936 struct work_struct *work)
937 {
938 struct xfs_mount *mp = container_of(to_delayed_work(work),
939 struct xfs_mount, m_eofblocks_work);
940
941 if (!sb_start_write_trylock(mp->m_super))
942 return;
943 xfs_icache_free_eofblocks(mp, NULL);
944 sb_end_write(mp->m_super);
945
946 xfs_queue_eofblocks(mp);
947 }
948
949 /*
950 * Background scanning to trim preallocated CoW space. This is queued
951 * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
952 * (We'll just piggyback on the post-EOF prealloc space workqueue.)
953 */
954 void
xfs_queue_cowblocks(struct xfs_mount * mp)955 xfs_queue_cowblocks(
956 struct xfs_mount *mp)
957 {
958 rcu_read_lock();
959 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
960 queue_delayed_work(mp->m_eofblocks_workqueue,
961 &mp->m_cowblocks_work,
962 msecs_to_jiffies(xfs_cowb_secs * 1000));
963 rcu_read_unlock();
964 }
965
966 void
xfs_cowblocks_worker(struct work_struct * work)967 xfs_cowblocks_worker(
968 struct work_struct *work)
969 {
970 struct xfs_mount *mp = container_of(to_delayed_work(work),
971 struct xfs_mount, m_cowblocks_work);
972
973 if (!sb_start_write_trylock(mp->m_super))
974 return;
975 xfs_icache_free_cowblocks(mp, NULL);
976 sb_end_write(mp->m_super);
977
978 xfs_queue_cowblocks(mp);
979 }
980
981 /*
982 * Grab the inode for reclaim exclusively.
983 *
984 * We have found this inode via a lookup under RCU, so the inode may have
985 * already been freed, or it may be in the process of being recycled by
986 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
987 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
988 * will not be set. Hence we need to check for both these flag conditions to
989 * avoid inodes that are no longer reclaim candidates.
990 *
991 * Note: checking for other state flags here, under the i_flags_lock or not, is
992 * racy and should be avoided. Those races should be resolved only after we have
993 * ensured that we are able to reclaim this inode and the world can see that we
994 * are going to reclaim it.
995 *
996 * Return true if we grabbed it, false otherwise.
997 */
998 static bool
xfs_reclaim_inode_grab(struct xfs_inode * ip)999 xfs_reclaim_inode_grab(
1000 struct xfs_inode *ip)
1001 {
1002 ASSERT(rcu_read_lock_held());
1003
1004 spin_lock(&ip->i_flags_lock);
1005 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
1006 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
1007 /* not a reclaim candidate. */
1008 spin_unlock(&ip->i_flags_lock);
1009 return false;
1010 }
1011 __xfs_iflags_set(ip, XFS_IRECLAIM);
1012 spin_unlock(&ip->i_flags_lock);
1013 return true;
1014 }
1015
1016 /*
1017 * Inode reclaim is non-blocking, so the default action if progress cannot be
1018 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
1019 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
1020 * blocking anymore and hence we can wait for the inode to be able to reclaim
1021 * it.
1022 *
1023 * We do no IO here - if callers require inodes to be cleaned they must push the
1024 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
1025 * done in the background in a non-blocking manner, and enables memory reclaim
1026 * to make progress without blocking.
1027 */
1028 static void
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag)1029 xfs_reclaim_inode(
1030 struct xfs_inode *ip,
1031 struct xfs_perag *pag)
1032 {
1033 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
1034
1035 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
1036 goto out;
1037 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
1038 goto out_iunlock;
1039
1040 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1041 xfs_iunpin_wait(ip);
1042 xfs_iflush_abort(ip);
1043 goto reclaim;
1044 }
1045 if (xfs_ipincount(ip))
1046 goto out_clear_flush;
1047 if (!xfs_inode_clean(ip))
1048 goto out_clear_flush;
1049
1050 xfs_iflags_clear(ip, XFS_IFLUSHING);
1051 reclaim:
1052
1053 /*
1054 * Because we use RCU freeing we need to ensure the inode always appears
1055 * to be reclaimed with an invalid inode number when in the free state.
1056 * We do this as early as possible under the ILOCK so that
1057 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1058 * detect races with us here. By doing this, we guarantee that once
1059 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1060 * it will see either a valid inode that will serialise correctly, or it
1061 * will see an invalid inode that it can skip.
1062 */
1063 spin_lock(&ip->i_flags_lock);
1064 ip->i_flags = XFS_IRECLAIM;
1065 ip->i_ino = 0;
1066 spin_unlock(&ip->i_flags_lock);
1067
1068 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1069
1070 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1071 /*
1072 * Remove the inode from the per-AG radix tree.
1073 *
1074 * Because radix_tree_delete won't complain even if the item was never
1075 * added to the tree assert that it's been there before to catch
1076 * problems with the inode life time early on.
1077 */
1078 spin_lock(&pag->pag_ici_lock);
1079 if (!radix_tree_delete(&pag->pag_ici_root,
1080 XFS_INO_TO_AGINO(ip->i_mount, ino)))
1081 ASSERT(0);
1082 xfs_perag_clear_reclaim_tag(pag);
1083 spin_unlock(&pag->pag_ici_lock);
1084
1085 /*
1086 * Here we do an (almost) spurious inode lock in order to coordinate
1087 * with inode cache radix tree lookups. This is because the lookup
1088 * can reference the inodes in the cache without taking references.
1089 *
1090 * We make that OK here by ensuring that we wait until the inode is
1091 * unlocked after the lookup before we go ahead and free it.
1092 */
1093 xfs_ilock(ip, XFS_ILOCK_EXCL);
1094 xfs_qm_dqdetach(ip);
1095 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1096 ASSERT(xfs_inode_clean(ip));
1097
1098 __xfs_inode_free(ip);
1099 return;
1100
1101 out_clear_flush:
1102 xfs_iflags_clear(ip, XFS_IFLUSHING);
1103 out_iunlock:
1104 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1105 out:
1106 xfs_iflags_clear(ip, XFS_IRECLAIM);
1107 }
1108
1109 /*
1110 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1111 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1112 * then a shut down during filesystem unmount reclaim walk leak all the
1113 * unreclaimed inodes.
1114 *
1115 * Returns non-zero if any AGs or inodes were skipped in the reclaim pass
1116 * so that callers that want to block until all dirty inodes are written back
1117 * and reclaimed can sanely loop.
1118 */
1119 static void
xfs_reclaim_inodes_ag(struct xfs_mount * mp,int * nr_to_scan)1120 xfs_reclaim_inodes_ag(
1121 struct xfs_mount *mp,
1122 int *nr_to_scan)
1123 {
1124 struct xfs_perag *pag;
1125 xfs_agnumber_t ag = 0;
1126
1127 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1128 unsigned long first_index = 0;
1129 int done = 0;
1130 int nr_found = 0;
1131
1132 ag = pag->pag_agno + 1;
1133
1134 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1135 do {
1136 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1137 int i;
1138
1139 rcu_read_lock();
1140 nr_found = radix_tree_gang_lookup_tag(
1141 &pag->pag_ici_root,
1142 (void **)batch, first_index,
1143 XFS_LOOKUP_BATCH,
1144 XFS_ICI_RECLAIM_TAG);
1145 if (!nr_found) {
1146 done = 1;
1147 rcu_read_unlock();
1148 break;
1149 }
1150
1151 /*
1152 * Grab the inodes before we drop the lock. if we found
1153 * nothing, nr == 0 and the loop will be skipped.
1154 */
1155 for (i = 0; i < nr_found; i++) {
1156 struct xfs_inode *ip = batch[i];
1157
1158 if (done || !xfs_reclaim_inode_grab(ip))
1159 batch[i] = NULL;
1160
1161 /*
1162 * Update the index for the next lookup. Catch
1163 * overflows into the next AG range which can
1164 * occur if we have inodes in the last block of
1165 * the AG and we are currently pointing to the
1166 * last inode.
1167 *
1168 * Because we may see inodes that are from the
1169 * wrong AG due to RCU freeing and
1170 * reallocation, only update the index if it
1171 * lies in this AG. It was a race that lead us
1172 * to see this inode, so another lookup from
1173 * the same index will not find it again.
1174 */
1175 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1176 pag->pag_agno)
1177 continue;
1178 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1179 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1180 done = 1;
1181 }
1182
1183 /* unlock now we've grabbed the inodes. */
1184 rcu_read_unlock();
1185
1186 for (i = 0; i < nr_found; i++) {
1187 if (batch[i])
1188 xfs_reclaim_inode(batch[i], pag);
1189 }
1190
1191 *nr_to_scan -= XFS_LOOKUP_BATCH;
1192 cond_resched();
1193 } while (nr_found && !done && *nr_to_scan > 0);
1194
1195 if (done)
1196 first_index = 0;
1197 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1198 xfs_perag_put(pag);
1199 }
1200 }
1201
1202 void
xfs_reclaim_inodes(struct xfs_mount * mp)1203 xfs_reclaim_inodes(
1204 struct xfs_mount *mp)
1205 {
1206 int nr_to_scan = INT_MAX;
1207
1208 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
1209 xfs_ail_push_all_sync(mp->m_ail);
1210 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1211 }
1212 }
1213
1214 /*
1215 * The shrinker infrastructure determines how many inodes we should scan for
1216 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
1217 * push the AIL here. We also want to proactively free up memory if we can to
1218 * minimise the amount of work memory reclaim has to do so we kick the
1219 * background reclaim if it isn't already scheduled.
1220 */
1221 long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,int nr_to_scan)1222 xfs_reclaim_inodes_nr(
1223 struct xfs_mount *mp,
1224 int nr_to_scan)
1225 {
1226 /* kick background reclaimer and push the AIL */
1227 xfs_reclaim_work_queue(mp);
1228 xfs_ail_push_all(mp->m_ail);
1229
1230 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1231 return 0;
1232 }
1233
1234 /*
1235 * Return the number of reclaimable inodes in the filesystem for
1236 * the shrinker to determine how much to reclaim.
1237 */
1238 int
xfs_reclaim_inodes_count(struct xfs_mount * mp)1239 xfs_reclaim_inodes_count(
1240 struct xfs_mount *mp)
1241 {
1242 struct xfs_perag *pag;
1243 xfs_agnumber_t ag = 0;
1244 int reclaimable = 0;
1245
1246 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1247 ag = pag->pag_agno + 1;
1248 reclaimable += pag->pag_ici_reclaimable;
1249 xfs_perag_put(pag);
1250 }
1251 return reclaimable;
1252 }
1253
1254 STATIC bool
xfs_inode_match_id(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1255 xfs_inode_match_id(
1256 struct xfs_inode *ip,
1257 struct xfs_eofblocks *eofb)
1258 {
1259 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1260 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1261 return false;
1262
1263 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1264 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1265 return false;
1266
1267 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1268 ip->i_d.di_projid != eofb->eof_prid)
1269 return false;
1270
1271 return true;
1272 }
1273
1274 /*
1275 * A union-based inode filtering algorithm. Process the inode if any of the
1276 * criteria match. This is for global/internal scans only.
1277 */
1278 STATIC bool
xfs_inode_match_id_union(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1279 xfs_inode_match_id_union(
1280 struct xfs_inode *ip,
1281 struct xfs_eofblocks *eofb)
1282 {
1283 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1284 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1285 return true;
1286
1287 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1288 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1289 return true;
1290
1291 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1292 ip->i_d.di_projid == eofb->eof_prid)
1293 return true;
1294
1295 return false;
1296 }
1297
1298 /*
1299 * Is this inode @ip eligible for eof/cow block reclamation, given some
1300 * filtering parameters @eofb? The inode is eligible if @eofb is null or
1301 * if the predicate functions match.
1302 */
1303 static bool
xfs_inode_matches_eofb(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1304 xfs_inode_matches_eofb(
1305 struct xfs_inode *ip,
1306 struct xfs_eofblocks *eofb)
1307 {
1308 bool match;
1309
1310 if (!eofb)
1311 return true;
1312
1313 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1314 match = xfs_inode_match_id_union(ip, eofb);
1315 else
1316 match = xfs_inode_match_id(ip, eofb);
1317 if (!match)
1318 return false;
1319
1320 /* skip the inode if the file size is too small */
1321 if ((eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE) &&
1322 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1323 return false;
1324
1325 return true;
1326 }
1327
1328 /*
1329 * This is a fast pass over the inode cache to try to get reclaim moving on as
1330 * many inodes as possible in a short period of time. It kicks itself every few
1331 * seconds, as well as being kicked by the inode cache shrinker when memory
1332 * goes low.
1333 */
1334 void
xfs_reclaim_worker(struct work_struct * work)1335 xfs_reclaim_worker(
1336 struct work_struct *work)
1337 {
1338 struct xfs_mount *mp = container_of(to_delayed_work(work),
1339 struct xfs_mount, m_reclaim_work);
1340 int nr_to_scan = INT_MAX;
1341
1342 xfs_reclaim_inodes_ag(mp, &nr_to_scan);
1343 xfs_reclaim_work_queue(mp);
1344 }
1345
1346 STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,void * args)1347 xfs_inode_free_eofblocks(
1348 struct xfs_inode *ip,
1349 void *args)
1350 {
1351 struct xfs_eofblocks *eofb = args;
1352 bool wait;
1353 int ret;
1354
1355 wait = eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC);
1356
1357 if (!xfs_can_free_eofblocks(ip, false)) {
1358 /* inode could be preallocated or append-only */
1359 trace_xfs_inode_free_eofblocks_invalid(ip);
1360 xfs_inode_clear_eofblocks_tag(ip);
1361 return 0;
1362 }
1363
1364 /*
1365 * If the mapping is dirty the operation can block and wait for some
1366 * time. Unless we are waiting, skip it.
1367 */
1368 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1369 return 0;
1370
1371 if (!xfs_inode_matches_eofb(ip, eofb))
1372 return 0;
1373
1374 /*
1375 * If the caller is waiting, return -EAGAIN to keep the background
1376 * scanner moving and revisit the inode in a subsequent pass.
1377 */
1378 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1379 if (wait)
1380 return -EAGAIN;
1381 return 0;
1382 }
1383
1384 ret = xfs_free_eofblocks(ip);
1385 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1386
1387 return ret;
1388 }
1389
1390 int
xfs_icache_free_eofblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1391 xfs_icache_free_eofblocks(
1392 struct xfs_mount *mp,
1393 struct xfs_eofblocks *eofb)
1394 {
1395 return xfs_inode_walk(mp, 0, xfs_inode_free_eofblocks, eofb,
1396 XFS_ICI_EOFBLOCKS_TAG);
1397 }
1398
1399 /*
1400 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1401 * multiple quotas, we don't know exactly which quota caused an allocation
1402 * failure. We make a best effort by including each quota under low free space
1403 * conditions (less than 1% free space) in the scan.
1404 */
1405 static int
__xfs_inode_free_quota_eofblocks(struct xfs_inode * ip,int (* execute)(struct xfs_mount * mp,struct xfs_eofblocks * eofb))1406 __xfs_inode_free_quota_eofblocks(
1407 struct xfs_inode *ip,
1408 int (*execute)(struct xfs_mount *mp,
1409 struct xfs_eofblocks *eofb))
1410 {
1411 int scan = 0;
1412 struct xfs_eofblocks eofb = {0};
1413 struct xfs_dquot *dq;
1414
1415 /*
1416 * Run a sync scan to increase effectiveness and use the union filter to
1417 * cover all applicable quotas in a single scan.
1418 */
1419 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1420
1421 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1422 dq = xfs_inode_dquot(ip, XFS_DQTYPE_USER);
1423 if (dq && xfs_dquot_lowsp(dq)) {
1424 eofb.eof_uid = VFS_I(ip)->i_uid;
1425 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1426 scan = 1;
1427 }
1428 }
1429
1430 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1431 dq = xfs_inode_dquot(ip, XFS_DQTYPE_GROUP);
1432 if (dq && xfs_dquot_lowsp(dq)) {
1433 eofb.eof_gid = VFS_I(ip)->i_gid;
1434 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1435 scan = 1;
1436 }
1437 }
1438
1439 if (scan)
1440 execute(ip->i_mount, &eofb);
1441
1442 return scan;
1443 }
1444
1445 int
xfs_inode_free_quota_eofblocks(struct xfs_inode * ip)1446 xfs_inode_free_quota_eofblocks(
1447 struct xfs_inode *ip)
1448 {
1449 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1450 }
1451
1452 static inline unsigned long
xfs_iflag_for_tag(int tag)1453 xfs_iflag_for_tag(
1454 int tag)
1455 {
1456 switch (tag) {
1457 case XFS_ICI_EOFBLOCKS_TAG:
1458 return XFS_IEOFBLOCKS;
1459 case XFS_ICI_COWBLOCKS_TAG:
1460 return XFS_ICOWBLOCKS;
1461 default:
1462 ASSERT(0);
1463 return 0;
1464 }
1465 }
1466
1467 static void
__xfs_inode_set_blocks_tag(xfs_inode_t * ip,void (* execute)(struct xfs_mount * mp),void (* set_tp)(struct xfs_mount * mp,xfs_agnumber_t agno,int error,unsigned long caller_ip),int tag)1468 __xfs_inode_set_blocks_tag(
1469 xfs_inode_t *ip,
1470 void (*execute)(struct xfs_mount *mp),
1471 void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1472 int error, unsigned long caller_ip),
1473 int tag)
1474 {
1475 struct xfs_mount *mp = ip->i_mount;
1476 struct xfs_perag *pag;
1477 int tagged;
1478
1479 /*
1480 * Don't bother locking the AG and looking up in the radix trees
1481 * if we already know that we have the tag set.
1482 */
1483 if (ip->i_flags & xfs_iflag_for_tag(tag))
1484 return;
1485 spin_lock(&ip->i_flags_lock);
1486 ip->i_flags |= xfs_iflag_for_tag(tag);
1487 spin_unlock(&ip->i_flags_lock);
1488
1489 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1490 spin_lock(&pag->pag_ici_lock);
1491
1492 tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1493 radix_tree_tag_set(&pag->pag_ici_root,
1494 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1495 if (!tagged) {
1496 /* propagate the eofblocks tag up into the perag radix tree */
1497 spin_lock(&ip->i_mount->m_perag_lock);
1498 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1499 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1500 tag);
1501 spin_unlock(&ip->i_mount->m_perag_lock);
1502
1503 /* kick off background trimming */
1504 execute(ip->i_mount);
1505
1506 set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1507 }
1508
1509 spin_unlock(&pag->pag_ici_lock);
1510 xfs_perag_put(pag);
1511 }
1512
1513 void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1514 xfs_inode_set_eofblocks_tag(
1515 xfs_inode_t *ip)
1516 {
1517 trace_xfs_inode_set_eofblocks_tag(ip);
1518 return __xfs_inode_set_blocks_tag(ip, xfs_queue_eofblocks,
1519 trace_xfs_perag_set_eofblocks,
1520 XFS_ICI_EOFBLOCKS_TAG);
1521 }
1522
1523 static void
__xfs_inode_clear_blocks_tag(xfs_inode_t * ip,void (* clear_tp)(struct xfs_mount * mp,xfs_agnumber_t agno,int error,unsigned long caller_ip),int tag)1524 __xfs_inode_clear_blocks_tag(
1525 xfs_inode_t *ip,
1526 void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1527 int error, unsigned long caller_ip),
1528 int tag)
1529 {
1530 struct xfs_mount *mp = ip->i_mount;
1531 struct xfs_perag *pag;
1532
1533 spin_lock(&ip->i_flags_lock);
1534 ip->i_flags &= ~xfs_iflag_for_tag(tag);
1535 spin_unlock(&ip->i_flags_lock);
1536
1537 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1538 spin_lock(&pag->pag_ici_lock);
1539
1540 radix_tree_tag_clear(&pag->pag_ici_root,
1541 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1542 if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1543 /* clear the eofblocks tag from the perag radix tree */
1544 spin_lock(&ip->i_mount->m_perag_lock);
1545 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1546 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1547 tag);
1548 spin_unlock(&ip->i_mount->m_perag_lock);
1549 clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1550 }
1551
1552 spin_unlock(&pag->pag_ici_lock);
1553 xfs_perag_put(pag);
1554 }
1555
1556 void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1557 xfs_inode_clear_eofblocks_tag(
1558 xfs_inode_t *ip)
1559 {
1560 trace_xfs_inode_clear_eofblocks_tag(ip);
1561 return __xfs_inode_clear_blocks_tag(ip,
1562 trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1563 }
1564
1565 /*
1566 * Set ourselves up to free CoW blocks from this file. If it's already clean
1567 * then we can bail out quickly, but otherwise we must back off if the file
1568 * is undergoing some kind of write.
1569 */
1570 static bool
xfs_prep_free_cowblocks(struct xfs_inode * ip)1571 xfs_prep_free_cowblocks(
1572 struct xfs_inode *ip)
1573 {
1574 /*
1575 * Just clear the tag if we have an empty cow fork or none at all. It's
1576 * possible the inode was fully unshared since it was originally tagged.
1577 */
1578 if (!xfs_inode_has_cow_data(ip)) {
1579 trace_xfs_inode_free_cowblocks_invalid(ip);
1580 xfs_inode_clear_cowblocks_tag(ip);
1581 return false;
1582 }
1583
1584 /*
1585 * If the mapping is dirty or under writeback we cannot touch the
1586 * CoW fork. Leave it alone if we're in the midst of a directio.
1587 */
1588 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1589 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1590 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1591 atomic_read(&VFS_I(ip)->i_dio_count))
1592 return false;
1593
1594 return true;
1595 }
1596
1597 /*
1598 * Automatic CoW Reservation Freeing
1599 *
1600 * These functions automatically garbage collect leftover CoW reservations
1601 * that were made on behalf of a cowextsize hint when we start to run out
1602 * of quota or when the reservations sit around for too long. If the file
1603 * has dirty pages or is undergoing writeback, its CoW reservations will
1604 * be retained.
1605 *
1606 * The actual garbage collection piggybacks off the same code that runs
1607 * the speculative EOF preallocation garbage collector.
1608 */
1609 STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,void * args)1610 xfs_inode_free_cowblocks(
1611 struct xfs_inode *ip,
1612 void *args)
1613 {
1614 struct xfs_eofblocks *eofb = args;
1615 int ret = 0;
1616
1617 if (!xfs_prep_free_cowblocks(ip))
1618 return 0;
1619
1620 if (!xfs_inode_matches_eofb(ip, eofb))
1621 return 0;
1622
1623 /* Free the CoW blocks */
1624 xfs_ilock(ip, XFS_IOLOCK_EXCL);
1625 xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1626
1627 /*
1628 * Check again, nobody else should be able to dirty blocks or change
1629 * the reflink iflag now that we have the first two locks held.
1630 */
1631 if (xfs_prep_free_cowblocks(ip))
1632 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1633
1634 xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1635 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1636
1637 return ret;
1638 }
1639
1640 int
xfs_icache_free_cowblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1641 xfs_icache_free_cowblocks(
1642 struct xfs_mount *mp,
1643 struct xfs_eofblocks *eofb)
1644 {
1645 return xfs_inode_walk(mp, 0, xfs_inode_free_cowblocks, eofb,
1646 XFS_ICI_COWBLOCKS_TAG);
1647 }
1648
1649 int
xfs_inode_free_quota_cowblocks(struct xfs_inode * ip)1650 xfs_inode_free_quota_cowblocks(
1651 struct xfs_inode *ip)
1652 {
1653 return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1654 }
1655
1656 void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)1657 xfs_inode_set_cowblocks_tag(
1658 xfs_inode_t *ip)
1659 {
1660 trace_xfs_inode_set_cowblocks_tag(ip);
1661 return __xfs_inode_set_blocks_tag(ip, xfs_queue_cowblocks,
1662 trace_xfs_perag_set_cowblocks,
1663 XFS_ICI_COWBLOCKS_TAG);
1664 }
1665
1666 void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)1667 xfs_inode_clear_cowblocks_tag(
1668 xfs_inode_t *ip)
1669 {
1670 trace_xfs_inode_clear_cowblocks_tag(ip);
1671 return __xfs_inode_clear_blocks_tag(ip,
1672 trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1673 }
1674
1675 /* Disable post-EOF and CoW block auto-reclamation. */
1676 void
xfs_stop_block_reaping(struct xfs_mount * mp)1677 xfs_stop_block_reaping(
1678 struct xfs_mount *mp)
1679 {
1680 cancel_delayed_work_sync(&mp->m_eofblocks_work);
1681 cancel_delayed_work_sync(&mp->m_cowblocks_work);
1682 }
1683
1684 /* Enable post-EOF and CoW block auto-reclamation. */
1685 void
xfs_start_block_reaping(struct xfs_mount * mp)1686 xfs_start_block_reaping(
1687 struct xfs_mount *mp)
1688 {
1689 xfs_queue_eofblocks(mp);
1690 xfs_queue_cowblocks(mp);
1691 }
1692