1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4 * All Rights Reserved.
5 */
6 #include "xfs.h"
7 #include "xfs_fs.h"
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_mount.h"
13 #include "xfs_inode.h"
14 #include "xfs_trans.h"
15 #include "xfs_trans_priv.h"
16 #include "xfs_inode_item.h"
17 #include "xfs_quota.h"
18 #include "xfs_trace.h"
19 #include "xfs_icache.h"
20 #include "xfs_bmap_util.h"
21 #include "xfs_dquot_item.h"
22 #include "xfs_dquot.h"
23 #include "xfs_reflink.h"
24 #include "xfs_ialloc.h"
25 #include "xfs_ag.h"
26 #include "xfs_log_priv.h"
27
28 #include <linux/iversion.h>
29
30 /* Radix tree tags for incore inode tree. */
31
32 /* inode is to be reclaimed */
33 #define XFS_ICI_RECLAIM_TAG 0
34 /* Inode has speculative preallocations (posteof or cow) to clean. */
35 #define XFS_ICI_BLOCKGC_TAG 1
36
37 /*
38 * The goal for walking incore inodes. These can correspond with incore inode
39 * radix tree tags when convenient. Avoid existing XFS_IWALK namespace.
40 */
41 enum xfs_icwalk_goal {
42 /* Goals directly associated with tagged inodes. */
43 XFS_ICWALK_BLOCKGC = XFS_ICI_BLOCKGC_TAG,
44 XFS_ICWALK_RECLAIM = XFS_ICI_RECLAIM_TAG,
45 };
46
47 static int xfs_icwalk(struct xfs_mount *mp,
48 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
49 static int xfs_icwalk_ag(struct xfs_perag *pag,
50 enum xfs_icwalk_goal goal, struct xfs_icwalk *icw);
51
52 /*
53 * Private inode cache walk flags for struct xfs_icwalk. Must not
54 * coincide with XFS_ICWALK_FLAGS_VALID.
55 */
56
57 /* Stop scanning after icw_scan_limit inodes. */
58 #define XFS_ICWALK_FLAG_SCAN_LIMIT (1U << 28)
59
60 #define XFS_ICWALK_FLAG_RECLAIM_SICK (1U << 27)
61 #define XFS_ICWALK_FLAG_UNION (1U << 26) /* union filter algorithm */
62
63 #define XFS_ICWALK_PRIVATE_FLAGS (XFS_ICWALK_FLAG_SCAN_LIMIT | \
64 XFS_ICWALK_FLAG_RECLAIM_SICK | \
65 XFS_ICWALK_FLAG_UNION)
66
67 /*
68 * Allocate and initialise an xfs_inode.
69 */
70 struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)71 xfs_inode_alloc(
72 struct xfs_mount *mp,
73 xfs_ino_t ino)
74 {
75 struct xfs_inode *ip;
76
77 /*
78 * XXX: If this didn't occur in transactions, we could drop GFP_NOFAIL
79 * and return NULL here on ENOMEM.
80 */
81 ip = alloc_inode_sb(mp->m_super, xfs_inode_cache, GFP_KERNEL | __GFP_NOFAIL);
82
83 if (inode_init_always(mp->m_super, VFS_I(ip))) {
84 kmem_cache_free(xfs_inode_cache, ip);
85 return NULL;
86 }
87
88 /* VFS doesn't initialise i_mode or i_state! */
89 VFS_I(ip)->i_mode = 0;
90 VFS_I(ip)->i_state = 0;
91 mapping_set_large_folios(VFS_I(ip)->i_mapping);
92
93 XFS_STATS_INC(mp, vn_active);
94 ASSERT(atomic_read(&ip->i_pincount) == 0);
95 ASSERT(ip->i_ino == 0);
96
97 /* initialise the xfs inode */
98 ip->i_ino = ino;
99 ip->i_mount = mp;
100 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
101 ip->i_cowfp = NULL;
102 memset(&ip->i_af, 0, sizeof(ip->i_af));
103 ip->i_af.if_format = XFS_DINODE_FMT_EXTENTS;
104 memset(&ip->i_df, 0, sizeof(ip->i_df));
105 ip->i_flags = 0;
106 ip->i_delayed_blks = 0;
107 ip->i_diflags2 = mp->m_ino_geo.new_diflags2;
108 ip->i_nblocks = 0;
109 ip->i_forkoff = 0;
110 ip->i_sick = 0;
111 ip->i_checked = 0;
112 INIT_WORK(&ip->i_ioend_work, xfs_end_io);
113 INIT_LIST_HEAD(&ip->i_ioend_list);
114 spin_lock_init(&ip->i_ioend_lock);
115 ip->i_next_unlinked = NULLAGINO;
116 ip->i_prev_unlinked = NULLAGINO;
117
118 return ip;
119 }
120
121 STATIC void
xfs_inode_free_callback(struct rcu_head * head)122 xfs_inode_free_callback(
123 struct rcu_head *head)
124 {
125 struct inode *inode = container_of(head, struct inode, i_rcu);
126 struct xfs_inode *ip = XFS_I(inode);
127
128 switch (VFS_I(ip)->i_mode & S_IFMT) {
129 case S_IFREG:
130 case S_IFDIR:
131 case S_IFLNK:
132 xfs_idestroy_fork(&ip->i_df);
133 break;
134 }
135
136 xfs_ifork_zap_attr(ip);
137
138 if (ip->i_cowfp) {
139 xfs_idestroy_fork(ip->i_cowfp);
140 kmem_cache_free(xfs_ifork_cache, ip->i_cowfp);
141 }
142 if (ip->i_itemp) {
143 ASSERT(!test_bit(XFS_LI_IN_AIL,
144 &ip->i_itemp->ili_item.li_flags));
145 xfs_inode_item_destroy(ip);
146 ip->i_itemp = NULL;
147 }
148
149 kmem_cache_free(xfs_inode_cache, ip);
150 }
151
152 static void
__xfs_inode_free(struct xfs_inode * ip)153 __xfs_inode_free(
154 struct xfs_inode *ip)
155 {
156 /* asserts to verify all state is correct here */
157 ASSERT(atomic_read(&ip->i_pincount) == 0);
158 ASSERT(!ip->i_itemp || list_empty(&ip->i_itemp->ili_item.li_bio_list));
159 XFS_STATS_DEC(ip->i_mount, vn_active);
160
161 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
162 }
163
164 void
xfs_inode_free(struct xfs_inode * ip)165 xfs_inode_free(
166 struct xfs_inode *ip)
167 {
168 ASSERT(!xfs_iflags_test(ip, XFS_IFLUSHING));
169
170 /*
171 * Because we use RCU freeing we need to ensure the inode always
172 * appears to be reclaimed with an invalid inode number when in the
173 * free state. The ip->i_flags_lock provides the barrier against lookup
174 * races.
175 */
176 spin_lock(&ip->i_flags_lock);
177 ip->i_flags = XFS_IRECLAIM;
178 ip->i_ino = 0;
179 spin_unlock(&ip->i_flags_lock);
180
181 __xfs_inode_free(ip);
182 }
183
184 /*
185 * Queue background inode reclaim work if there are reclaimable inodes and there
186 * isn't reclaim work already scheduled or in progress.
187 */
188 static void
xfs_reclaim_work_queue(struct xfs_mount * mp)189 xfs_reclaim_work_queue(
190 struct xfs_mount *mp)
191 {
192
193 rcu_read_lock();
194 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
195 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
196 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
197 }
198 rcu_read_unlock();
199 }
200
201 /*
202 * Background scanning to trim preallocated space. This is queued based on the
203 * 'speculative_prealloc_lifetime' tunable (5m by default).
204 */
205 static inline void
xfs_blockgc_queue(struct xfs_perag * pag)206 xfs_blockgc_queue(
207 struct xfs_perag *pag)
208 {
209 struct xfs_mount *mp = pag->pag_mount;
210
211 if (!xfs_is_blockgc_enabled(mp))
212 return;
213
214 rcu_read_lock();
215 if (radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_BLOCKGC_TAG))
216 queue_delayed_work(pag->pag_mount->m_blockgc_wq,
217 &pag->pag_blockgc_work,
218 msecs_to_jiffies(xfs_blockgc_secs * 1000));
219 rcu_read_unlock();
220 }
221
222 /* Set a tag on both the AG incore inode tree and the AG radix tree. */
223 static void
xfs_perag_set_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)224 xfs_perag_set_inode_tag(
225 struct xfs_perag *pag,
226 xfs_agino_t agino,
227 unsigned int tag)
228 {
229 struct xfs_mount *mp = pag->pag_mount;
230 bool was_tagged;
231
232 lockdep_assert_held(&pag->pag_ici_lock);
233
234 was_tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
235 radix_tree_tag_set(&pag->pag_ici_root, agino, tag);
236
237 if (tag == XFS_ICI_RECLAIM_TAG)
238 pag->pag_ici_reclaimable++;
239
240 if (was_tagged)
241 return;
242
243 /* propagate the tag up into the perag radix tree */
244 spin_lock(&mp->m_perag_lock);
245 radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno, tag);
246 spin_unlock(&mp->m_perag_lock);
247
248 /* start background work */
249 switch (tag) {
250 case XFS_ICI_RECLAIM_TAG:
251 xfs_reclaim_work_queue(mp);
252 break;
253 case XFS_ICI_BLOCKGC_TAG:
254 xfs_blockgc_queue(pag);
255 break;
256 }
257
258 trace_xfs_perag_set_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
259 }
260
261 /* Clear a tag on both the AG incore inode tree and the AG radix tree. */
262 static void
xfs_perag_clear_inode_tag(struct xfs_perag * pag,xfs_agino_t agino,unsigned int tag)263 xfs_perag_clear_inode_tag(
264 struct xfs_perag *pag,
265 xfs_agino_t agino,
266 unsigned int tag)
267 {
268 struct xfs_mount *mp = pag->pag_mount;
269
270 lockdep_assert_held(&pag->pag_ici_lock);
271
272 /*
273 * Reclaim can signal (with a null agino) that it cleared its own tag
274 * by removing the inode from the radix tree.
275 */
276 if (agino != NULLAGINO)
277 radix_tree_tag_clear(&pag->pag_ici_root, agino, tag);
278 else
279 ASSERT(tag == XFS_ICI_RECLAIM_TAG);
280
281 if (tag == XFS_ICI_RECLAIM_TAG)
282 pag->pag_ici_reclaimable--;
283
284 if (radix_tree_tagged(&pag->pag_ici_root, tag))
285 return;
286
287 /* clear the tag from the perag radix tree */
288 spin_lock(&mp->m_perag_lock);
289 radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno, tag);
290 spin_unlock(&mp->m_perag_lock);
291
292 trace_xfs_perag_clear_inode_tag(mp, pag->pag_agno, tag, _RET_IP_);
293 }
294
295 /*
296 * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
297 * part of the structure. This is made more complex by the fact we store
298 * information about the on-disk values in the VFS inode and so we can't just
299 * overwrite the values unconditionally. Hence we save the parameters we
300 * need to retain across reinitialisation, and rewrite them into the VFS inode
301 * after reinitialisation even if it fails.
302 */
303 static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)304 xfs_reinit_inode(
305 struct xfs_mount *mp,
306 struct inode *inode)
307 {
308 int error;
309 uint32_t nlink = inode->i_nlink;
310 uint32_t generation = inode->i_generation;
311 uint64_t version = inode_peek_iversion(inode);
312 umode_t mode = inode->i_mode;
313 dev_t dev = inode->i_rdev;
314 kuid_t uid = inode->i_uid;
315 kgid_t gid = inode->i_gid;
316
317 error = inode_init_always(mp->m_super, inode);
318
319 set_nlink(inode, nlink);
320 inode->i_generation = generation;
321 inode_set_iversion_queried(inode, version);
322 inode->i_mode = mode;
323 inode->i_rdev = dev;
324 inode->i_uid = uid;
325 inode->i_gid = gid;
326 mapping_set_large_folios(inode->i_mapping);
327 return error;
328 }
329
330 /*
331 * Carefully nudge an inode whose VFS state has been torn down back into a
332 * usable state. Drops the i_flags_lock and the rcu read lock.
333 */
334 static int
xfs_iget_recycle(struct xfs_perag * pag,struct xfs_inode * ip)335 xfs_iget_recycle(
336 struct xfs_perag *pag,
337 struct xfs_inode *ip) __releases(&ip->i_flags_lock)
338 {
339 struct xfs_mount *mp = ip->i_mount;
340 struct inode *inode = VFS_I(ip);
341 int error;
342
343 trace_xfs_iget_recycle(ip);
344
345 /*
346 * We need to make it look like the inode is being reclaimed to prevent
347 * the actual reclaim workers from stomping over us while we recycle
348 * the inode. We can't clear the radix tree tag yet as it requires
349 * pag_ici_lock to be held exclusive.
350 */
351 ip->i_flags |= XFS_IRECLAIM;
352
353 spin_unlock(&ip->i_flags_lock);
354 rcu_read_unlock();
355
356 ASSERT(!rwsem_is_locked(&inode->i_rwsem));
357 error = xfs_reinit_inode(mp, inode);
358 if (error) {
359 /*
360 * Re-initializing the inode failed, and we are in deep
361 * trouble. Try to re-add it to the reclaim list.
362 */
363 rcu_read_lock();
364 spin_lock(&ip->i_flags_lock);
365 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
366 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
367 spin_unlock(&ip->i_flags_lock);
368 rcu_read_unlock();
369
370 trace_xfs_iget_recycle_fail(ip);
371 return error;
372 }
373
374 spin_lock(&pag->pag_ici_lock);
375 spin_lock(&ip->i_flags_lock);
376
377 /*
378 * Clear the per-lifetime state in the inode as we are now effectively
379 * a new inode and need to return to the initial state before reuse
380 * occurs.
381 */
382 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
383 ip->i_flags |= XFS_INEW;
384 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
385 XFS_ICI_RECLAIM_TAG);
386 inode->i_state = I_NEW;
387 spin_unlock(&ip->i_flags_lock);
388 spin_unlock(&pag->pag_ici_lock);
389
390 return 0;
391 }
392
393 /*
394 * If we are allocating a new inode, then check what was returned is
395 * actually a free, empty inode. If we are not allocating an inode,
396 * then check we didn't find a free inode.
397 *
398 * Returns:
399 * 0 if the inode free state matches the lookup context
400 * -ENOENT if the inode is free and we are not allocating
401 * -EFSCORRUPTED if there is any state mismatch at all
402 */
403 static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)404 xfs_iget_check_free_state(
405 struct xfs_inode *ip,
406 int flags)
407 {
408 if (flags & XFS_IGET_CREATE) {
409 /* should be a free inode */
410 if (VFS_I(ip)->i_mode != 0) {
411 xfs_warn(ip->i_mount,
412 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
413 ip->i_ino, VFS_I(ip)->i_mode);
414 return -EFSCORRUPTED;
415 }
416
417 if (ip->i_nblocks != 0) {
418 xfs_warn(ip->i_mount,
419 "Corruption detected! Free inode 0x%llx has blocks allocated!",
420 ip->i_ino);
421 return -EFSCORRUPTED;
422 }
423 return 0;
424 }
425
426 /* should be an allocated inode */
427 if (VFS_I(ip)->i_mode == 0)
428 return -ENOENT;
429
430 return 0;
431 }
432
433 /* Make all pending inactivation work start immediately. */
434 static bool
xfs_inodegc_queue_all(struct xfs_mount * mp)435 xfs_inodegc_queue_all(
436 struct xfs_mount *mp)
437 {
438 struct xfs_inodegc *gc;
439 int cpu;
440 bool ret = false;
441
442 for_each_online_cpu(cpu) {
443 gc = per_cpu_ptr(mp->m_inodegc, cpu);
444 if (!llist_empty(&gc->list)) {
445 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
446 ret = true;
447 }
448 }
449
450 return ret;
451 }
452
453 /*
454 * Check the validity of the inode we just found it the cache
455 */
456 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)457 xfs_iget_cache_hit(
458 struct xfs_perag *pag,
459 struct xfs_inode *ip,
460 xfs_ino_t ino,
461 int flags,
462 int lock_flags) __releases(RCU)
463 {
464 struct inode *inode = VFS_I(ip);
465 struct xfs_mount *mp = ip->i_mount;
466 int error;
467
468 /*
469 * check for re-use of an inode within an RCU grace period due to the
470 * radix tree nodes not being updated yet. We monitor for this by
471 * setting the inode number to zero before freeing the inode structure.
472 * If the inode has been reallocated and set up, then the inode number
473 * will not match, so check for that, too.
474 */
475 spin_lock(&ip->i_flags_lock);
476 if (ip->i_ino != ino)
477 goto out_skip;
478
479 /*
480 * If we are racing with another cache hit that is currently
481 * instantiating this inode or currently recycling it out of
482 * reclaimable state, wait for the initialisation to complete
483 * before continuing.
484 *
485 * If we're racing with the inactivation worker we also want to wait.
486 * If we're creating a new file, it's possible that the worker
487 * previously marked the inode as free on disk but hasn't finished
488 * updating the incore state yet. The AGI buffer will be dirty and
489 * locked to the icreate transaction, so a synchronous push of the
490 * inodegc workers would result in deadlock. For a regular iget, the
491 * worker is running already, so we might as well wait.
492 *
493 * XXX(hch): eventually we should do something equivalent to
494 * wait_on_inode to wait for these flags to be cleared
495 * instead of polling for it.
496 */
497 if (ip->i_flags & (XFS_INEW | XFS_IRECLAIM | XFS_INACTIVATING))
498 goto out_skip;
499
500 if (ip->i_flags & XFS_NEED_INACTIVE) {
501 /* Unlinked inodes cannot be re-grabbed. */
502 if (VFS_I(ip)->i_nlink == 0) {
503 error = -ENOENT;
504 goto out_error;
505 }
506 goto out_inodegc_flush;
507 }
508
509 /*
510 * Check the inode free state is valid. This also detects lookup
511 * racing with unlinks.
512 */
513 error = xfs_iget_check_free_state(ip, flags);
514 if (error)
515 goto out_error;
516
517 /* Skip inodes that have no vfs state. */
518 if ((flags & XFS_IGET_INCORE) &&
519 (ip->i_flags & XFS_IRECLAIMABLE))
520 goto out_skip;
521
522 /* The inode fits the selection criteria; process it. */
523 if (ip->i_flags & XFS_IRECLAIMABLE) {
524 /* Drops i_flags_lock and RCU read lock. */
525 error = xfs_iget_recycle(pag, ip);
526 if (error)
527 return error;
528 } else {
529 /* If the VFS inode is being torn down, pause and try again. */
530 if (!igrab(inode))
531 goto out_skip;
532
533 /* We've got a live one. */
534 spin_unlock(&ip->i_flags_lock);
535 rcu_read_unlock();
536 trace_xfs_iget_hit(ip);
537 }
538
539 if (lock_flags != 0)
540 xfs_ilock(ip, lock_flags);
541
542 if (!(flags & XFS_IGET_INCORE))
543 xfs_iflags_clear(ip, XFS_ISTALE);
544 XFS_STATS_INC(mp, xs_ig_found);
545
546 return 0;
547
548 out_skip:
549 trace_xfs_iget_skip(ip);
550 XFS_STATS_INC(mp, xs_ig_frecycle);
551 error = -EAGAIN;
552 out_error:
553 spin_unlock(&ip->i_flags_lock);
554 rcu_read_unlock();
555 return error;
556
557 out_inodegc_flush:
558 spin_unlock(&ip->i_flags_lock);
559 rcu_read_unlock();
560 /*
561 * Do not wait for the workers, because the caller could hold an AGI
562 * buffer lock. We're just going to sleep in a loop anyway.
563 */
564 if (xfs_is_inodegc_enabled(mp))
565 xfs_inodegc_queue_all(mp);
566 return -EAGAIN;
567 }
568
569 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)570 xfs_iget_cache_miss(
571 struct xfs_mount *mp,
572 struct xfs_perag *pag,
573 xfs_trans_t *tp,
574 xfs_ino_t ino,
575 struct xfs_inode **ipp,
576 int flags,
577 int lock_flags)
578 {
579 struct xfs_inode *ip;
580 int error;
581 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
582 int iflags;
583
584 ip = xfs_inode_alloc(mp, ino);
585 if (!ip)
586 return -ENOMEM;
587
588 error = xfs_imap(mp, tp, ip->i_ino, &ip->i_imap, flags);
589 if (error)
590 goto out_destroy;
591
592 /*
593 * For version 5 superblocks, if we are initialising a new inode and we
594 * are not utilising the XFS_FEAT_IKEEP inode cluster mode, we can
595 * simply build the new inode core with a random generation number.
596 *
597 * For version 4 (and older) superblocks, log recovery is dependent on
598 * the i_flushiter field being initialised from the current on-disk
599 * value and hence we must also read the inode off disk even when
600 * initializing new inodes.
601 */
602 if (xfs_has_v3inodes(mp) &&
603 (flags & XFS_IGET_CREATE) && !xfs_has_ikeep(mp)) {
604 VFS_I(ip)->i_generation = get_random_u32();
605 } else {
606 struct xfs_buf *bp;
607
608 error = xfs_imap_to_bp(mp, tp, &ip->i_imap, &bp);
609 if (error)
610 goto out_destroy;
611
612 error = xfs_inode_from_disk(ip,
613 xfs_buf_offset(bp, ip->i_imap.im_boffset));
614 if (!error)
615 xfs_buf_set_ref(bp, XFS_INO_REF);
616 xfs_trans_brelse(tp, bp);
617
618 if (error)
619 goto out_destroy;
620 }
621
622 trace_xfs_iget_miss(ip);
623
624 /*
625 * Check the inode free state is valid. This also detects lookup
626 * racing with unlinks.
627 */
628 error = xfs_iget_check_free_state(ip, flags);
629 if (error)
630 goto out_destroy;
631
632 /*
633 * Preload the radix tree so we can insert safely under the
634 * write spinlock. Note that we cannot sleep inside the preload
635 * region. Since we can be called from transaction context, don't
636 * recurse into the file system.
637 */
638 if (radix_tree_preload(GFP_NOFS)) {
639 error = -EAGAIN;
640 goto out_destroy;
641 }
642
643 /*
644 * Because the inode hasn't been added to the radix-tree yet it can't
645 * be found by another thread, so we can do the non-sleeping lock here.
646 */
647 if (lock_flags) {
648 if (!xfs_ilock_nowait(ip, lock_flags))
649 BUG();
650 }
651
652 /*
653 * These values must be set before inserting the inode into the radix
654 * tree as the moment it is inserted a concurrent lookup (allowed by the
655 * RCU locking mechanism) can find it and that lookup must see that this
656 * is an inode currently under construction (i.e. that XFS_INEW is set).
657 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
658 * memory barrier that ensures this detection works correctly at lookup
659 * time.
660 */
661 iflags = XFS_INEW;
662 if (flags & XFS_IGET_DONTCACHE)
663 d_mark_dontcache(VFS_I(ip));
664 ip->i_udquot = NULL;
665 ip->i_gdquot = NULL;
666 ip->i_pdquot = NULL;
667 xfs_iflags_set(ip, iflags);
668
669 /* insert the new inode */
670 spin_lock(&pag->pag_ici_lock);
671 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
672 if (unlikely(error)) {
673 WARN_ON(error != -EEXIST);
674 XFS_STATS_INC(mp, xs_ig_dup);
675 error = -EAGAIN;
676 goto out_preload_end;
677 }
678 spin_unlock(&pag->pag_ici_lock);
679 radix_tree_preload_end();
680
681 *ipp = ip;
682 return 0;
683
684 out_preload_end:
685 spin_unlock(&pag->pag_ici_lock);
686 radix_tree_preload_end();
687 if (lock_flags)
688 xfs_iunlock(ip, lock_flags);
689 out_destroy:
690 __destroy_inode(VFS_I(ip));
691 xfs_inode_free(ip);
692 return error;
693 }
694
695 /*
696 * Look up an inode by number in the given file system. The inode is looked up
697 * in the cache held in each AG. If the inode is found in the cache, initialise
698 * the vfs inode if necessary.
699 *
700 * If it is not in core, read it in from the file system's device, add it to the
701 * cache and initialise the vfs inode.
702 *
703 * The inode is locked according to the value of the lock_flags parameter.
704 * Inode lookup is only done during metadata operations and not as part of the
705 * data IO path. Hence we only allow locking of the XFS_ILOCK during lookup.
706 */
707 int
xfs_iget(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,uint flags,uint lock_flags,struct xfs_inode ** ipp)708 xfs_iget(
709 struct xfs_mount *mp,
710 struct xfs_trans *tp,
711 xfs_ino_t ino,
712 uint flags,
713 uint lock_flags,
714 struct xfs_inode **ipp)
715 {
716 struct xfs_inode *ip;
717 struct xfs_perag *pag;
718 xfs_agino_t agino;
719 int error;
720
721 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
722
723 /* reject inode numbers outside existing AGs */
724 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
725 return -EINVAL;
726
727 XFS_STATS_INC(mp, xs_ig_attempts);
728
729 /* get the perag structure and ensure that it's inode capable */
730 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
731 agino = XFS_INO_TO_AGINO(mp, ino);
732
733 again:
734 error = 0;
735 rcu_read_lock();
736 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
737
738 if (ip) {
739 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
740 if (error)
741 goto out_error_or_again;
742 } else {
743 rcu_read_unlock();
744 if (flags & XFS_IGET_INCORE) {
745 error = -ENODATA;
746 goto out_error_or_again;
747 }
748 XFS_STATS_INC(mp, xs_ig_missed);
749
750 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
751 flags, lock_flags);
752 if (error)
753 goto out_error_or_again;
754 }
755 xfs_perag_put(pag);
756
757 *ipp = ip;
758
759 /*
760 * If we have a real type for an on-disk inode, we can setup the inode
761 * now. If it's a new inode being created, xfs_init_new_inode will
762 * handle it.
763 */
764 if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
765 xfs_setup_existing_inode(ip);
766 return 0;
767
768 out_error_or_again:
769 if (!(flags & XFS_IGET_INCORE) && error == -EAGAIN) {
770 delay(1);
771 goto again;
772 }
773 xfs_perag_put(pag);
774 return error;
775 }
776
777 /*
778 * "Is this a cached inode that's also allocated?"
779 *
780 * Look up an inode by number in the given file system. If the inode is
781 * in cache and isn't in purgatory, return 1 if the inode is allocated
782 * and 0 if it is not. For all other cases (not in cache, being torn
783 * down, etc.), return a negative error code.
784 *
785 * The caller has to prevent inode allocation and freeing activity,
786 * presumably by locking the AGI buffer. This is to ensure that an
787 * inode cannot transition from allocated to freed until the caller is
788 * ready to allow that. If the inode is in an intermediate state (new,
789 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
790 * inode is not in the cache, -ENOENT will be returned. The caller must
791 * deal with these scenarios appropriately.
792 *
793 * This is a specialized use case for the online scrubber; if you're
794 * reading this, you probably want xfs_iget.
795 */
796 int
xfs_icache_inode_is_allocated(struct xfs_mount * mp,struct xfs_trans * tp,xfs_ino_t ino,bool * inuse)797 xfs_icache_inode_is_allocated(
798 struct xfs_mount *mp,
799 struct xfs_trans *tp,
800 xfs_ino_t ino,
801 bool *inuse)
802 {
803 struct xfs_inode *ip;
804 int error;
805
806 error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
807 if (error)
808 return error;
809
810 *inuse = !!(VFS_I(ip)->i_mode);
811 xfs_irele(ip);
812 return 0;
813 }
814
815 /*
816 * Grab the inode for reclaim exclusively.
817 *
818 * We have found this inode via a lookup under RCU, so the inode may have
819 * already been freed, or it may be in the process of being recycled by
820 * xfs_iget(). In both cases, the inode will have XFS_IRECLAIM set. If the inode
821 * has been fully recycled by the time we get the i_flags_lock, XFS_IRECLAIMABLE
822 * will not be set. Hence we need to check for both these flag conditions to
823 * avoid inodes that are no longer reclaim candidates.
824 *
825 * Note: checking for other state flags here, under the i_flags_lock or not, is
826 * racy and should be avoided. Those races should be resolved only after we have
827 * ensured that we are able to reclaim this inode and the world can see that we
828 * are going to reclaim it.
829 *
830 * Return true if we grabbed it, false otherwise.
831 */
832 static bool
xfs_reclaim_igrab(struct xfs_inode * ip,struct xfs_icwalk * icw)833 xfs_reclaim_igrab(
834 struct xfs_inode *ip,
835 struct xfs_icwalk *icw)
836 {
837 ASSERT(rcu_read_lock_held());
838
839 spin_lock(&ip->i_flags_lock);
840 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
841 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
842 /* not a reclaim candidate. */
843 spin_unlock(&ip->i_flags_lock);
844 return false;
845 }
846
847 /* Don't reclaim a sick inode unless the caller asked for it. */
848 if (ip->i_sick &&
849 (!icw || !(icw->icw_flags & XFS_ICWALK_FLAG_RECLAIM_SICK))) {
850 spin_unlock(&ip->i_flags_lock);
851 return false;
852 }
853
854 __xfs_iflags_set(ip, XFS_IRECLAIM);
855 spin_unlock(&ip->i_flags_lock);
856 return true;
857 }
858
859 /*
860 * Inode reclaim is non-blocking, so the default action if progress cannot be
861 * made is to "requeue" the inode for reclaim by unlocking it and clearing the
862 * XFS_IRECLAIM flag. If we are in a shutdown state, we don't care about
863 * blocking anymore and hence we can wait for the inode to be able to reclaim
864 * it.
865 *
866 * We do no IO here - if callers require inodes to be cleaned they must push the
867 * AIL first to trigger writeback of dirty inodes. This enables writeback to be
868 * done in the background in a non-blocking manner, and enables memory reclaim
869 * to make progress without blocking.
870 */
871 static void
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag)872 xfs_reclaim_inode(
873 struct xfs_inode *ip,
874 struct xfs_perag *pag)
875 {
876 xfs_ino_t ino = ip->i_ino; /* for radix_tree_delete */
877
878 if (!xfs_ilock_nowait(ip, XFS_ILOCK_EXCL))
879 goto out;
880 if (xfs_iflags_test_and_set(ip, XFS_IFLUSHING))
881 goto out_iunlock;
882
883 /*
884 * Check for log shutdown because aborting the inode can move the log
885 * tail and corrupt in memory state. This is fine if the log is shut
886 * down, but if the log is still active and only the mount is shut down
887 * then the in-memory log tail movement caused by the abort can be
888 * incorrectly propagated to disk.
889 */
890 if (xlog_is_shutdown(ip->i_mount->m_log)) {
891 xfs_iunpin_wait(ip);
892 xfs_iflush_shutdown_abort(ip);
893 goto reclaim;
894 }
895 if (xfs_ipincount(ip))
896 goto out_clear_flush;
897 if (!xfs_inode_clean(ip))
898 goto out_clear_flush;
899
900 xfs_iflags_clear(ip, XFS_IFLUSHING);
901 reclaim:
902 trace_xfs_inode_reclaiming(ip);
903
904 /*
905 * Because we use RCU freeing we need to ensure the inode always appears
906 * to be reclaimed with an invalid inode number when in the free state.
907 * We do this as early as possible under the ILOCK so that
908 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
909 * detect races with us here. By doing this, we guarantee that once
910 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
911 * it will see either a valid inode that will serialise correctly, or it
912 * will see an invalid inode that it can skip.
913 */
914 spin_lock(&ip->i_flags_lock);
915 ip->i_flags = XFS_IRECLAIM;
916 ip->i_ino = 0;
917 ip->i_sick = 0;
918 ip->i_checked = 0;
919 spin_unlock(&ip->i_flags_lock);
920
921 ASSERT(!ip->i_itemp || ip->i_itemp->ili_item.li_buf == NULL);
922 xfs_iunlock(ip, XFS_ILOCK_EXCL);
923
924 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
925 /*
926 * Remove the inode from the per-AG radix tree.
927 *
928 * Because radix_tree_delete won't complain even if the item was never
929 * added to the tree assert that it's been there before to catch
930 * problems with the inode life time early on.
931 */
932 spin_lock(&pag->pag_ici_lock);
933 if (!radix_tree_delete(&pag->pag_ici_root,
934 XFS_INO_TO_AGINO(ip->i_mount, ino)))
935 ASSERT(0);
936 xfs_perag_clear_inode_tag(pag, NULLAGINO, XFS_ICI_RECLAIM_TAG);
937 spin_unlock(&pag->pag_ici_lock);
938
939 /*
940 * Here we do an (almost) spurious inode lock in order to coordinate
941 * with inode cache radix tree lookups. This is because the lookup
942 * can reference the inodes in the cache without taking references.
943 *
944 * We make that OK here by ensuring that we wait until the inode is
945 * unlocked after the lookup before we go ahead and free it.
946 */
947 xfs_ilock(ip, XFS_ILOCK_EXCL);
948 ASSERT(!ip->i_udquot && !ip->i_gdquot && !ip->i_pdquot);
949 xfs_iunlock(ip, XFS_ILOCK_EXCL);
950 ASSERT(xfs_inode_clean(ip));
951
952 __xfs_inode_free(ip);
953 return;
954
955 out_clear_flush:
956 xfs_iflags_clear(ip, XFS_IFLUSHING);
957 out_iunlock:
958 xfs_iunlock(ip, XFS_ILOCK_EXCL);
959 out:
960 xfs_iflags_clear(ip, XFS_IRECLAIM);
961 }
962
963 /* Reclaim sick inodes if we're unmounting or the fs went down. */
964 static inline bool
xfs_want_reclaim_sick(struct xfs_mount * mp)965 xfs_want_reclaim_sick(
966 struct xfs_mount *mp)
967 {
968 return xfs_is_unmounting(mp) || xfs_has_norecovery(mp) ||
969 xfs_is_shutdown(mp);
970 }
971
972 void
xfs_reclaim_inodes(struct xfs_mount * mp)973 xfs_reclaim_inodes(
974 struct xfs_mount *mp)
975 {
976 struct xfs_icwalk icw = {
977 .icw_flags = 0,
978 };
979
980 if (xfs_want_reclaim_sick(mp))
981 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
982
983 while (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
984 xfs_ail_push_all_sync(mp->m_ail);
985 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
986 }
987 }
988
989 /*
990 * The shrinker infrastructure determines how many inodes we should scan for
991 * reclaim. We want as many clean inodes ready to reclaim as possible, so we
992 * push the AIL here. We also want to proactively free up memory if we can to
993 * minimise the amount of work memory reclaim has to do so we kick the
994 * background reclaim if it isn't already scheduled.
995 */
996 long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,unsigned long nr_to_scan)997 xfs_reclaim_inodes_nr(
998 struct xfs_mount *mp,
999 unsigned long nr_to_scan)
1000 {
1001 struct xfs_icwalk icw = {
1002 .icw_flags = XFS_ICWALK_FLAG_SCAN_LIMIT,
1003 .icw_scan_limit = min_t(unsigned long, LONG_MAX, nr_to_scan),
1004 };
1005
1006 if (xfs_want_reclaim_sick(mp))
1007 icw.icw_flags |= XFS_ICWALK_FLAG_RECLAIM_SICK;
1008
1009 /* kick background reclaimer and push the AIL */
1010 xfs_reclaim_work_queue(mp);
1011 xfs_ail_push_all(mp->m_ail);
1012
1013 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, &icw);
1014 return 0;
1015 }
1016
1017 /*
1018 * Return the number of reclaimable inodes in the filesystem for
1019 * the shrinker to determine how much to reclaim.
1020 */
1021 long
xfs_reclaim_inodes_count(struct xfs_mount * mp)1022 xfs_reclaim_inodes_count(
1023 struct xfs_mount *mp)
1024 {
1025 struct xfs_perag *pag;
1026 xfs_agnumber_t ag = 0;
1027 long reclaimable = 0;
1028
1029 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1030 ag = pag->pag_agno + 1;
1031 reclaimable += pag->pag_ici_reclaimable;
1032 xfs_perag_put(pag);
1033 }
1034 return reclaimable;
1035 }
1036
1037 STATIC bool
xfs_icwalk_match_id(struct xfs_inode * ip,struct xfs_icwalk * icw)1038 xfs_icwalk_match_id(
1039 struct xfs_inode *ip,
1040 struct xfs_icwalk *icw)
1041 {
1042 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1043 !uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1044 return false;
1045
1046 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1047 !gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1048 return false;
1049
1050 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1051 ip->i_projid != icw->icw_prid)
1052 return false;
1053
1054 return true;
1055 }
1056
1057 /*
1058 * A union-based inode filtering algorithm. Process the inode if any of the
1059 * criteria match. This is for global/internal scans only.
1060 */
1061 STATIC bool
xfs_icwalk_match_id_union(struct xfs_inode * ip,struct xfs_icwalk * icw)1062 xfs_icwalk_match_id_union(
1063 struct xfs_inode *ip,
1064 struct xfs_icwalk *icw)
1065 {
1066 if ((icw->icw_flags & XFS_ICWALK_FLAG_UID) &&
1067 uid_eq(VFS_I(ip)->i_uid, icw->icw_uid))
1068 return true;
1069
1070 if ((icw->icw_flags & XFS_ICWALK_FLAG_GID) &&
1071 gid_eq(VFS_I(ip)->i_gid, icw->icw_gid))
1072 return true;
1073
1074 if ((icw->icw_flags & XFS_ICWALK_FLAG_PRID) &&
1075 ip->i_projid == icw->icw_prid)
1076 return true;
1077
1078 return false;
1079 }
1080
1081 /*
1082 * Is this inode @ip eligible for eof/cow block reclamation, given some
1083 * filtering parameters @icw? The inode is eligible if @icw is null or
1084 * if the predicate functions match.
1085 */
1086 static bool
xfs_icwalk_match(struct xfs_inode * ip,struct xfs_icwalk * icw)1087 xfs_icwalk_match(
1088 struct xfs_inode *ip,
1089 struct xfs_icwalk *icw)
1090 {
1091 bool match;
1092
1093 if (!icw)
1094 return true;
1095
1096 if (icw->icw_flags & XFS_ICWALK_FLAG_UNION)
1097 match = xfs_icwalk_match_id_union(ip, icw);
1098 else
1099 match = xfs_icwalk_match_id(ip, icw);
1100 if (!match)
1101 return false;
1102
1103 /* skip the inode if the file size is too small */
1104 if ((icw->icw_flags & XFS_ICWALK_FLAG_MINFILESIZE) &&
1105 XFS_ISIZE(ip) < icw->icw_min_file_size)
1106 return false;
1107
1108 return true;
1109 }
1110
1111 /*
1112 * This is a fast pass over the inode cache to try to get reclaim moving on as
1113 * many inodes as possible in a short period of time. It kicks itself every few
1114 * seconds, as well as being kicked by the inode cache shrinker when memory
1115 * goes low.
1116 */
1117 void
xfs_reclaim_worker(struct work_struct * work)1118 xfs_reclaim_worker(
1119 struct work_struct *work)
1120 {
1121 struct xfs_mount *mp = container_of(to_delayed_work(work),
1122 struct xfs_mount, m_reclaim_work);
1123
1124 xfs_icwalk(mp, XFS_ICWALK_RECLAIM, NULL);
1125 xfs_reclaim_work_queue(mp);
1126 }
1127
1128 STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)1129 xfs_inode_free_eofblocks(
1130 struct xfs_inode *ip,
1131 struct xfs_icwalk *icw,
1132 unsigned int *lockflags)
1133 {
1134 bool wait;
1135
1136 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1137
1138 if (!xfs_iflags_test(ip, XFS_IEOFBLOCKS))
1139 return 0;
1140
1141 /*
1142 * If the mapping is dirty the operation can block and wait for some
1143 * time. Unless we are waiting, skip it.
1144 */
1145 if (!wait && mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1146 return 0;
1147
1148 if (!xfs_icwalk_match(ip, icw))
1149 return 0;
1150
1151 /*
1152 * If the caller is waiting, return -EAGAIN to keep the background
1153 * scanner moving and revisit the inode in a subsequent pass.
1154 */
1155 if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1156 if (wait)
1157 return -EAGAIN;
1158 return 0;
1159 }
1160 *lockflags |= XFS_IOLOCK_EXCL;
1161
1162 if (xfs_can_free_eofblocks(ip, false))
1163 return xfs_free_eofblocks(ip);
1164
1165 /* inode could be preallocated or append-only */
1166 trace_xfs_inode_free_eofblocks_invalid(ip);
1167 xfs_inode_clear_eofblocks_tag(ip);
1168 return 0;
1169 }
1170
1171 static void
xfs_blockgc_set_iflag(struct xfs_inode * ip,unsigned long iflag)1172 xfs_blockgc_set_iflag(
1173 struct xfs_inode *ip,
1174 unsigned long iflag)
1175 {
1176 struct xfs_mount *mp = ip->i_mount;
1177 struct xfs_perag *pag;
1178
1179 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1180
1181 /*
1182 * Don't bother locking the AG and looking up in the radix trees
1183 * if we already know that we have the tag set.
1184 */
1185 if (ip->i_flags & iflag)
1186 return;
1187 spin_lock(&ip->i_flags_lock);
1188 ip->i_flags |= iflag;
1189 spin_unlock(&ip->i_flags_lock);
1190
1191 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1192 spin_lock(&pag->pag_ici_lock);
1193
1194 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1195 XFS_ICI_BLOCKGC_TAG);
1196
1197 spin_unlock(&pag->pag_ici_lock);
1198 xfs_perag_put(pag);
1199 }
1200
1201 void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1202 xfs_inode_set_eofblocks_tag(
1203 xfs_inode_t *ip)
1204 {
1205 trace_xfs_inode_set_eofblocks_tag(ip);
1206 return xfs_blockgc_set_iflag(ip, XFS_IEOFBLOCKS);
1207 }
1208
1209 static void
xfs_blockgc_clear_iflag(struct xfs_inode * ip,unsigned long iflag)1210 xfs_blockgc_clear_iflag(
1211 struct xfs_inode *ip,
1212 unsigned long iflag)
1213 {
1214 struct xfs_mount *mp = ip->i_mount;
1215 struct xfs_perag *pag;
1216 bool clear_tag;
1217
1218 ASSERT((iflag & ~(XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0);
1219
1220 spin_lock(&ip->i_flags_lock);
1221 ip->i_flags &= ~iflag;
1222 clear_tag = (ip->i_flags & (XFS_IEOFBLOCKS | XFS_ICOWBLOCKS)) == 0;
1223 spin_unlock(&ip->i_flags_lock);
1224
1225 if (!clear_tag)
1226 return;
1227
1228 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1229 spin_lock(&pag->pag_ici_lock);
1230
1231 xfs_perag_clear_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1232 XFS_ICI_BLOCKGC_TAG);
1233
1234 spin_unlock(&pag->pag_ici_lock);
1235 xfs_perag_put(pag);
1236 }
1237
1238 void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1239 xfs_inode_clear_eofblocks_tag(
1240 xfs_inode_t *ip)
1241 {
1242 trace_xfs_inode_clear_eofblocks_tag(ip);
1243 return xfs_blockgc_clear_iflag(ip, XFS_IEOFBLOCKS);
1244 }
1245
1246 /*
1247 * Set ourselves up to free CoW blocks from this file. If it's already clean
1248 * then we can bail out quickly, but otherwise we must back off if the file
1249 * is undergoing some kind of write.
1250 */
1251 static bool
xfs_prep_free_cowblocks(struct xfs_inode * ip)1252 xfs_prep_free_cowblocks(
1253 struct xfs_inode *ip)
1254 {
1255 /*
1256 * Just clear the tag if we have an empty cow fork or none at all. It's
1257 * possible the inode was fully unshared since it was originally tagged.
1258 */
1259 if (!xfs_inode_has_cow_data(ip)) {
1260 trace_xfs_inode_free_cowblocks_invalid(ip);
1261 xfs_inode_clear_cowblocks_tag(ip);
1262 return false;
1263 }
1264
1265 /*
1266 * If the mapping is dirty or under writeback we cannot touch the
1267 * CoW fork. Leave it alone if we're in the midst of a directio.
1268 */
1269 if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1270 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1271 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1272 atomic_read(&VFS_I(ip)->i_dio_count))
1273 return false;
1274
1275 return true;
1276 }
1277
1278 /*
1279 * Automatic CoW Reservation Freeing
1280 *
1281 * These functions automatically garbage collect leftover CoW reservations
1282 * that were made on behalf of a cowextsize hint when we start to run out
1283 * of quota or when the reservations sit around for too long. If the file
1284 * has dirty pages or is undergoing writeback, its CoW reservations will
1285 * be retained.
1286 *
1287 * The actual garbage collection piggybacks off the same code that runs
1288 * the speculative EOF preallocation garbage collector.
1289 */
1290 STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,struct xfs_icwalk * icw,unsigned int * lockflags)1291 xfs_inode_free_cowblocks(
1292 struct xfs_inode *ip,
1293 struct xfs_icwalk *icw,
1294 unsigned int *lockflags)
1295 {
1296 bool wait;
1297 int ret = 0;
1298
1299 wait = icw && (icw->icw_flags & XFS_ICWALK_FLAG_SYNC);
1300
1301 if (!xfs_iflags_test(ip, XFS_ICOWBLOCKS))
1302 return 0;
1303
1304 if (!xfs_prep_free_cowblocks(ip))
1305 return 0;
1306
1307 if (!xfs_icwalk_match(ip, icw))
1308 return 0;
1309
1310 /*
1311 * If the caller is waiting, return -EAGAIN to keep the background
1312 * scanner moving and revisit the inode in a subsequent pass.
1313 */
1314 if (!(*lockflags & XFS_IOLOCK_EXCL) &&
1315 !xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1316 if (wait)
1317 return -EAGAIN;
1318 return 0;
1319 }
1320 *lockflags |= XFS_IOLOCK_EXCL;
1321
1322 if (!xfs_ilock_nowait(ip, XFS_MMAPLOCK_EXCL)) {
1323 if (wait)
1324 return -EAGAIN;
1325 return 0;
1326 }
1327 *lockflags |= XFS_MMAPLOCK_EXCL;
1328
1329 /*
1330 * Check again, nobody else should be able to dirty blocks or change
1331 * the reflink iflag now that we have the first two locks held.
1332 */
1333 if (xfs_prep_free_cowblocks(ip))
1334 ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1335 return ret;
1336 }
1337
1338 void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)1339 xfs_inode_set_cowblocks_tag(
1340 xfs_inode_t *ip)
1341 {
1342 trace_xfs_inode_set_cowblocks_tag(ip);
1343 return xfs_blockgc_set_iflag(ip, XFS_ICOWBLOCKS);
1344 }
1345
1346 void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)1347 xfs_inode_clear_cowblocks_tag(
1348 xfs_inode_t *ip)
1349 {
1350 trace_xfs_inode_clear_cowblocks_tag(ip);
1351 return xfs_blockgc_clear_iflag(ip, XFS_ICOWBLOCKS);
1352 }
1353
1354 /* Disable post-EOF and CoW block auto-reclamation. */
1355 void
xfs_blockgc_stop(struct xfs_mount * mp)1356 xfs_blockgc_stop(
1357 struct xfs_mount *mp)
1358 {
1359 struct xfs_perag *pag;
1360 xfs_agnumber_t agno;
1361
1362 if (!xfs_clear_blockgc_enabled(mp))
1363 return;
1364
1365 for_each_perag(mp, agno, pag)
1366 cancel_delayed_work_sync(&pag->pag_blockgc_work);
1367 trace_xfs_blockgc_stop(mp, __return_address);
1368 }
1369
1370 /* Enable post-EOF and CoW block auto-reclamation. */
1371 void
xfs_blockgc_start(struct xfs_mount * mp)1372 xfs_blockgc_start(
1373 struct xfs_mount *mp)
1374 {
1375 struct xfs_perag *pag;
1376 xfs_agnumber_t agno;
1377
1378 if (xfs_set_blockgc_enabled(mp))
1379 return;
1380
1381 trace_xfs_blockgc_start(mp, __return_address);
1382 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1383 xfs_blockgc_queue(pag);
1384 }
1385
1386 /* Don't try to run block gc on an inode that's in any of these states. */
1387 #define XFS_BLOCKGC_NOGRAB_IFLAGS (XFS_INEW | \
1388 XFS_NEED_INACTIVE | \
1389 XFS_INACTIVATING | \
1390 XFS_IRECLAIMABLE | \
1391 XFS_IRECLAIM)
1392 /*
1393 * Decide if the given @ip is eligible for garbage collection of speculative
1394 * preallocations, and grab it if so. Returns true if it's ready to go or
1395 * false if we should just ignore it.
1396 */
1397 static bool
xfs_blockgc_igrab(struct xfs_inode * ip)1398 xfs_blockgc_igrab(
1399 struct xfs_inode *ip)
1400 {
1401 struct inode *inode = VFS_I(ip);
1402
1403 ASSERT(rcu_read_lock_held());
1404
1405 /* Check for stale RCU freed inode */
1406 spin_lock(&ip->i_flags_lock);
1407 if (!ip->i_ino)
1408 goto out_unlock_noent;
1409
1410 if (ip->i_flags & XFS_BLOCKGC_NOGRAB_IFLAGS)
1411 goto out_unlock_noent;
1412 spin_unlock(&ip->i_flags_lock);
1413
1414 /* nothing to sync during shutdown */
1415 if (xfs_is_shutdown(ip->i_mount))
1416 return false;
1417
1418 /* If we can't grab the inode, it must on it's way to reclaim. */
1419 if (!igrab(inode))
1420 return false;
1421
1422 /* inode is valid */
1423 return true;
1424
1425 out_unlock_noent:
1426 spin_unlock(&ip->i_flags_lock);
1427 return false;
1428 }
1429
1430 /* Scan one incore inode for block preallocations that we can remove. */
1431 static int
xfs_blockgc_scan_inode(struct xfs_inode * ip,struct xfs_icwalk * icw)1432 xfs_blockgc_scan_inode(
1433 struct xfs_inode *ip,
1434 struct xfs_icwalk *icw)
1435 {
1436 unsigned int lockflags = 0;
1437 int error;
1438
1439 error = xfs_inode_free_eofblocks(ip, icw, &lockflags);
1440 if (error)
1441 goto unlock;
1442
1443 error = xfs_inode_free_cowblocks(ip, icw, &lockflags);
1444 unlock:
1445 if (lockflags)
1446 xfs_iunlock(ip, lockflags);
1447 xfs_irele(ip);
1448 return error;
1449 }
1450
1451 /* Background worker that trims preallocated space. */
1452 void
xfs_blockgc_worker(struct work_struct * work)1453 xfs_blockgc_worker(
1454 struct work_struct *work)
1455 {
1456 struct xfs_perag *pag = container_of(to_delayed_work(work),
1457 struct xfs_perag, pag_blockgc_work);
1458 struct xfs_mount *mp = pag->pag_mount;
1459 int error;
1460
1461 trace_xfs_blockgc_worker(mp, __return_address);
1462
1463 error = xfs_icwalk_ag(pag, XFS_ICWALK_BLOCKGC, NULL);
1464 if (error)
1465 xfs_info(mp, "AG %u preallocation gc worker failed, err=%d",
1466 pag->pag_agno, error);
1467 xfs_blockgc_queue(pag);
1468 }
1469
1470 /*
1471 * Try to free space in the filesystem by purging inactive inodes, eofblocks
1472 * and cowblocks.
1473 */
1474 int
xfs_blockgc_free_space(struct xfs_mount * mp,struct xfs_icwalk * icw)1475 xfs_blockgc_free_space(
1476 struct xfs_mount *mp,
1477 struct xfs_icwalk *icw)
1478 {
1479 int error;
1480
1481 trace_xfs_blockgc_free_space(mp, icw, _RET_IP_);
1482
1483 error = xfs_icwalk(mp, XFS_ICWALK_BLOCKGC, icw);
1484 if (error)
1485 return error;
1486
1487 xfs_inodegc_flush(mp);
1488 return 0;
1489 }
1490
1491 /*
1492 * Reclaim all the free space that we can by scheduling the background blockgc
1493 * and inodegc workers immediately and waiting for them all to clear.
1494 */
1495 void
xfs_blockgc_flush_all(struct xfs_mount * mp)1496 xfs_blockgc_flush_all(
1497 struct xfs_mount *mp)
1498 {
1499 struct xfs_perag *pag;
1500 xfs_agnumber_t agno;
1501
1502 trace_xfs_blockgc_flush_all(mp, __return_address);
1503
1504 /*
1505 * For each blockgc worker, move its queue time up to now. If it
1506 * wasn't queued, it will not be requeued. Then flush whatever's
1507 * left.
1508 */
1509 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1510 mod_delayed_work(pag->pag_mount->m_blockgc_wq,
1511 &pag->pag_blockgc_work, 0);
1512
1513 for_each_perag_tag(mp, agno, pag, XFS_ICI_BLOCKGC_TAG)
1514 flush_delayed_work(&pag->pag_blockgc_work);
1515
1516 xfs_inodegc_flush(mp);
1517 }
1518
1519 /*
1520 * Run cow/eofblocks scans on the supplied dquots. We don't know exactly which
1521 * quota caused an allocation failure, so we make a best effort by including
1522 * each quota under low free space conditions (less than 1% free space) in the
1523 * scan.
1524 *
1525 * Callers must not hold any inode's ILOCK. If requesting a synchronous scan
1526 * (XFS_ICWALK_FLAG_SYNC), the caller also must not hold any inode's IOLOCK or
1527 * MMAPLOCK.
1528 */
1529 int
xfs_blockgc_free_dquots(struct xfs_mount * mp,struct xfs_dquot * udqp,struct xfs_dquot * gdqp,struct xfs_dquot * pdqp,unsigned int iwalk_flags)1530 xfs_blockgc_free_dquots(
1531 struct xfs_mount *mp,
1532 struct xfs_dquot *udqp,
1533 struct xfs_dquot *gdqp,
1534 struct xfs_dquot *pdqp,
1535 unsigned int iwalk_flags)
1536 {
1537 struct xfs_icwalk icw = {0};
1538 bool do_work = false;
1539
1540 if (!udqp && !gdqp && !pdqp)
1541 return 0;
1542
1543 /*
1544 * Run a scan to free blocks using the union filter to cover all
1545 * applicable quotas in a single scan.
1546 */
1547 icw.icw_flags = XFS_ICWALK_FLAG_UNION | iwalk_flags;
1548
1549 if (XFS_IS_UQUOTA_ENFORCED(mp) && udqp && xfs_dquot_lowsp(udqp)) {
1550 icw.icw_uid = make_kuid(mp->m_super->s_user_ns, udqp->q_id);
1551 icw.icw_flags |= XFS_ICWALK_FLAG_UID;
1552 do_work = true;
1553 }
1554
1555 if (XFS_IS_UQUOTA_ENFORCED(mp) && gdqp && xfs_dquot_lowsp(gdqp)) {
1556 icw.icw_gid = make_kgid(mp->m_super->s_user_ns, gdqp->q_id);
1557 icw.icw_flags |= XFS_ICWALK_FLAG_GID;
1558 do_work = true;
1559 }
1560
1561 if (XFS_IS_PQUOTA_ENFORCED(mp) && pdqp && xfs_dquot_lowsp(pdqp)) {
1562 icw.icw_prid = pdqp->q_id;
1563 icw.icw_flags |= XFS_ICWALK_FLAG_PRID;
1564 do_work = true;
1565 }
1566
1567 if (!do_work)
1568 return 0;
1569
1570 return xfs_blockgc_free_space(mp, &icw);
1571 }
1572
1573 /* Run cow/eofblocks scans on the quotas attached to the inode. */
1574 int
xfs_blockgc_free_quota(struct xfs_inode * ip,unsigned int iwalk_flags)1575 xfs_blockgc_free_quota(
1576 struct xfs_inode *ip,
1577 unsigned int iwalk_flags)
1578 {
1579 return xfs_blockgc_free_dquots(ip->i_mount,
1580 xfs_inode_dquot(ip, XFS_DQTYPE_USER),
1581 xfs_inode_dquot(ip, XFS_DQTYPE_GROUP),
1582 xfs_inode_dquot(ip, XFS_DQTYPE_PROJ), iwalk_flags);
1583 }
1584
1585 /* XFS Inode Cache Walking Code */
1586
1587 /*
1588 * The inode lookup is done in batches to keep the amount of lock traffic and
1589 * radix tree lookups to a minimum. The batch size is a trade off between
1590 * lookup reduction and stack usage. This is in the reclaim path, so we can't
1591 * be too greedy.
1592 */
1593 #define XFS_LOOKUP_BATCH 32
1594
1595
1596 /*
1597 * Decide if we want to grab this inode in anticipation of doing work towards
1598 * the goal.
1599 */
1600 static inline bool
xfs_icwalk_igrab(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_icwalk * icw)1601 xfs_icwalk_igrab(
1602 enum xfs_icwalk_goal goal,
1603 struct xfs_inode *ip,
1604 struct xfs_icwalk *icw)
1605 {
1606 switch (goal) {
1607 case XFS_ICWALK_BLOCKGC:
1608 return xfs_blockgc_igrab(ip);
1609 case XFS_ICWALK_RECLAIM:
1610 return xfs_reclaim_igrab(ip, icw);
1611 default:
1612 return false;
1613 }
1614 }
1615
1616 /*
1617 * Process an inode. Each processing function must handle any state changes
1618 * made by the icwalk igrab function. Return -EAGAIN to skip an inode.
1619 */
1620 static inline int
xfs_icwalk_process_inode(enum xfs_icwalk_goal goal,struct xfs_inode * ip,struct xfs_perag * pag,struct xfs_icwalk * icw)1621 xfs_icwalk_process_inode(
1622 enum xfs_icwalk_goal goal,
1623 struct xfs_inode *ip,
1624 struct xfs_perag *pag,
1625 struct xfs_icwalk *icw)
1626 {
1627 int error = 0;
1628
1629 switch (goal) {
1630 case XFS_ICWALK_BLOCKGC:
1631 error = xfs_blockgc_scan_inode(ip, icw);
1632 break;
1633 case XFS_ICWALK_RECLAIM:
1634 xfs_reclaim_inode(ip, pag);
1635 break;
1636 }
1637 return error;
1638 }
1639
1640 /*
1641 * For a given per-AG structure @pag and a goal, grab qualifying inodes and
1642 * process them in some manner.
1643 */
1644 static int
xfs_icwalk_ag(struct xfs_perag * pag,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1645 xfs_icwalk_ag(
1646 struct xfs_perag *pag,
1647 enum xfs_icwalk_goal goal,
1648 struct xfs_icwalk *icw)
1649 {
1650 struct xfs_mount *mp = pag->pag_mount;
1651 uint32_t first_index;
1652 int last_error = 0;
1653 int skipped;
1654 bool done;
1655 int nr_found;
1656
1657 restart:
1658 done = false;
1659 skipped = 0;
1660 if (goal == XFS_ICWALK_RECLAIM)
1661 first_index = READ_ONCE(pag->pag_ici_reclaim_cursor);
1662 else
1663 first_index = 0;
1664 nr_found = 0;
1665 do {
1666 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1667 int error = 0;
1668 int i;
1669
1670 rcu_read_lock();
1671
1672 nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
1673 (void **) batch, first_index,
1674 XFS_LOOKUP_BATCH, goal);
1675 if (!nr_found) {
1676 done = true;
1677 rcu_read_unlock();
1678 break;
1679 }
1680
1681 /*
1682 * Grab the inodes before we drop the lock. if we found
1683 * nothing, nr == 0 and the loop will be skipped.
1684 */
1685 for (i = 0; i < nr_found; i++) {
1686 struct xfs_inode *ip = batch[i];
1687
1688 if (done || !xfs_icwalk_igrab(goal, ip, icw))
1689 batch[i] = NULL;
1690
1691 /*
1692 * Update the index for the next lookup. Catch
1693 * overflows into the next AG range which can occur if
1694 * we have inodes in the last block of the AG and we
1695 * are currently pointing to the last inode.
1696 *
1697 * Because we may see inodes that are from the wrong AG
1698 * due to RCU freeing and reallocation, only update the
1699 * index if it lies in this AG. It was a race that lead
1700 * us to see this inode, so another lookup from the
1701 * same index will not find it again.
1702 */
1703 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
1704 continue;
1705 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1706 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1707 done = true;
1708 }
1709
1710 /* unlock now we've grabbed the inodes. */
1711 rcu_read_unlock();
1712
1713 for (i = 0; i < nr_found; i++) {
1714 if (!batch[i])
1715 continue;
1716 error = xfs_icwalk_process_inode(goal, batch[i], pag,
1717 icw);
1718 if (error == -EAGAIN) {
1719 skipped++;
1720 continue;
1721 }
1722 if (error && last_error != -EFSCORRUPTED)
1723 last_error = error;
1724 }
1725
1726 /* bail out if the filesystem is corrupted. */
1727 if (error == -EFSCORRUPTED)
1728 break;
1729
1730 cond_resched();
1731
1732 if (icw && (icw->icw_flags & XFS_ICWALK_FLAG_SCAN_LIMIT)) {
1733 icw->icw_scan_limit -= XFS_LOOKUP_BATCH;
1734 if (icw->icw_scan_limit <= 0)
1735 break;
1736 }
1737 } while (nr_found && !done);
1738
1739 if (goal == XFS_ICWALK_RECLAIM) {
1740 if (done)
1741 first_index = 0;
1742 WRITE_ONCE(pag->pag_ici_reclaim_cursor, first_index);
1743 }
1744
1745 if (skipped) {
1746 delay(1);
1747 goto restart;
1748 }
1749 return last_error;
1750 }
1751
1752 /* Walk all incore inodes to achieve a given goal. */
1753 static int
xfs_icwalk(struct xfs_mount * mp,enum xfs_icwalk_goal goal,struct xfs_icwalk * icw)1754 xfs_icwalk(
1755 struct xfs_mount *mp,
1756 enum xfs_icwalk_goal goal,
1757 struct xfs_icwalk *icw)
1758 {
1759 struct xfs_perag *pag;
1760 int error = 0;
1761 int last_error = 0;
1762 xfs_agnumber_t agno;
1763
1764 for_each_perag_tag(mp, agno, pag, goal) {
1765 error = xfs_icwalk_ag(pag, goal, icw);
1766 if (error) {
1767 last_error = error;
1768 if (error == -EFSCORRUPTED) {
1769 xfs_perag_put(pag);
1770 break;
1771 }
1772 }
1773 }
1774 return last_error;
1775 BUILD_BUG_ON(XFS_ICWALK_PRIVATE_FLAGS & XFS_ICWALK_FLAGS_VALID);
1776 }
1777
1778 #ifdef DEBUG
1779 static void
xfs_check_delalloc(struct xfs_inode * ip,int whichfork)1780 xfs_check_delalloc(
1781 struct xfs_inode *ip,
1782 int whichfork)
1783 {
1784 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork);
1785 struct xfs_bmbt_irec got;
1786 struct xfs_iext_cursor icur;
1787
1788 if (!ifp || !xfs_iext_lookup_extent(ip, ifp, 0, &icur, &got))
1789 return;
1790 do {
1791 if (isnullstartblock(got.br_startblock)) {
1792 xfs_warn(ip->i_mount,
1793 "ino %llx %s fork has delalloc extent at [0x%llx:0x%llx]",
1794 ip->i_ino,
1795 whichfork == XFS_DATA_FORK ? "data" : "cow",
1796 got.br_startoff, got.br_blockcount);
1797 }
1798 } while (xfs_iext_next_extent(ifp, &icur, &got));
1799 }
1800 #else
1801 #define xfs_check_delalloc(ip, whichfork) do { } while (0)
1802 #endif
1803
1804 /* Schedule the inode for reclaim. */
1805 static void
xfs_inodegc_set_reclaimable(struct xfs_inode * ip)1806 xfs_inodegc_set_reclaimable(
1807 struct xfs_inode *ip)
1808 {
1809 struct xfs_mount *mp = ip->i_mount;
1810 struct xfs_perag *pag;
1811
1812 if (!xfs_is_shutdown(mp) && ip->i_delayed_blks) {
1813 xfs_check_delalloc(ip, XFS_DATA_FORK);
1814 xfs_check_delalloc(ip, XFS_COW_FORK);
1815 ASSERT(0);
1816 }
1817
1818 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1819 spin_lock(&pag->pag_ici_lock);
1820 spin_lock(&ip->i_flags_lock);
1821
1822 trace_xfs_inode_set_reclaimable(ip);
1823 ip->i_flags &= ~(XFS_NEED_INACTIVE | XFS_INACTIVATING);
1824 ip->i_flags |= XFS_IRECLAIMABLE;
1825 xfs_perag_set_inode_tag(pag, XFS_INO_TO_AGINO(mp, ip->i_ino),
1826 XFS_ICI_RECLAIM_TAG);
1827
1828 spin_unlock(&ip->i_flags_lock);
1829 spin_unlock(&pag->pag_ici_lock);
1830 xfs_perag_put(pag);
1831 }
1832
1833 /*
1834 * Free all speculative preallocations and possibly even the inode itself.
1835 * This is the last chance to make changes to an otherwise unreferenced file
1836 * before incore reclamation happens.
1837 */
1838 static void
xfs_inodegc_inactivate(struct xfs_inode * ip)1839 xfs_inodegc_inactivate(
1840 struct xfs_inode *ip)
1841 {
1842 trace_xfs_inode_inactivating(ip);
1843 xfs_inactive(ip);
1844 xfs_inodegc_set_reclaimable(ip);
1845 }
1846
1847 void
xfs_inodegc_worker(struct work_struct * work)1848 xfs_inodegc_worker(
1849 struct work_struct *work)
1850 {
1851 struct xfs_inodegc *gc = container_of(to_delayed_work(work),
1852 struct xfs_inodegc, work);
1853 struct llist_node *node = llist_del_all(&gc->list);
1854 struct xfs_inode *ip, *n;
1855
1856 ASSERT(gc->cpu == smp_processor_id());
1857
1858 WRITE_ONCE(gc->items, 0);
1859
1860 if (!node)
1861 return;
1862
1863 ip = llist_entry(node, struct xfs_inode, i_gclist);
1864 trace_xfs_inodegc_worker(ip->i_mount, READ_ONCE(gc->shrinker_hits));
1865
1866 WRITE_ONCE(gc->shrinker_hits, 0);
1867 llist_for_each_entry_safe(ip, n, node, i_gclist) {
1868 xfs_iflags_set(ip, XFS_INACTIVATING);
1869 xfs_inodegc_inactivate(ip);
1870 }
1871 }
1872
1873 /*
1874 * Expedite all pending inodegc work to run immediately. This does not wait for
1875 * completion of the work.
1876 */
1877 void
xfs_inodegc_push(struct xfs_mount * mp)1878 xfs_inodegc_push(
1879 struct xfs_mount *mp)
1880 {
1881 if (!xfs_is_inodegc_enabled(mp))
1882 return;
1883 trace_xfs_inodegc_push(mp, __return_address);
1884 xfs_inodegc_queue_all(mp);
1885 }
1886
1887 /*
1888 * Force all currently queued inode inactivation work to run immediately and
1889 * wait for the work to finish.
1890 */
1891 void
xfs_inodegc_flush(struct xfs_mount * mp)1892 xfs_inodegc_flush(
1893 struct xfs_mount *mp)
1894 {
1895 xfs_inodegc_push(mp);
1896 trace_xfs_inodegc_flush(mp, __return_address);
1897 flush_workqueue(mp->m_inodegc_wq);
1898 }
1899
1900 /*
1901 * Flush all the pending work and then disable the inode inactivation background
1902 * workers and wait for them to stop. Caller must hold sb->s_umount to
1903 * coordinate changes in the inodegc_enabled state.
1904 */
1905 void
xfs_inodegc_stop(struct xfs_mount * mp)1906 xfs_inodegc_stop(
1907 struct xfs_mount *mp)
1908 {
1909 bool rerun;
1910
1911 if (!xfs_clear_inodegc_enabled(mp))
1912 return;
1913
1914 /*
1915 * Drain all pending inodegc work, including inodes that could be
1916 * queued by racing xfs_inodegc_queue or xfs_inodegc_shrinker_scan
1917 * threads that sample the inodegc state just prior to us clearing it.
1918 * The inodegc flag state prevents new threads from queuing more
1919 * inodes, so we queue pending work items and flush the workqueue until
1920 * all inodegc lists are empty. IOWs, we cannot use drain_workqueue
1921 * here because it does not allow other unserialized mechanisms to
1922 * reschedule inodegc work while this draining is in progress.
1923 */
1924 xfs_inodegc_queue_all(mp);
1925 do {
1926 flush_workqueue(mp->m_inodegc_wq);
1927 rerun = xfs_inodegc_queue_all(mp);
1928 } while (rerun);
1929
1930 trace_xfs_inodegc_stop(mp, __return_address);
1931 }
1932
1933 /*
1934 * Enable the inode inactivation background workers and schedule deferred inode
1935 * inactivation work if there is any. Caller must hold sb->s_umount to
1936 * coordinate changes in the inodegc_enabled state.
1937 */
1938 void
xfs_inodegc_start(struct xfs_mount * mp)1939 xfs_inodegc_start(
1940 struct xfs_mount *mp)
1941 {
1942 if (xfs_set_inodegc_enabled(mp))
1943 return;
1944
1945 trace_xfs_inodegc_start(mp, __return_address);
1946 xfs_inodegc_queue_all(mp);
1947 }
1948
1949 #ifdef CONFIG_XFS_RT
1950 static inline bool
xfs_inodegc_want_queue_rt_file(struct xfs_inode * ip)1951 xfs_inodegc_want_queue_rt_file(
1952 struct xfs_inode *ip)
1953 {
1954 struct xfs_mount *mp = ip->i_mount;
1955
1956 if (!XFS_IS_REALTIME_INODE(ip))
1957 return false;
1958
1959 if (__percpu_counter_compare(&mp->m_frextents,
1960 mp->m_low_rtexts[XFS_LOWSP_5_PCNT],
1961 XFS_FDBLOCKS_BATCH) < 0)
1962 return true;
1963
1964 return false;
1965 }
1966 #else
1967 # define xfs_inodegc_want_queue_rt_file(ip) (false)
1968 #endif /* CONFIG_XFS_RT */
1969
1970 /*
1971 * Schedule the inactivation worker when:
1972 *
1973 * - We've accumulated more than one inode cluster buffer's worth of inodes.
1974 * - There is less than 5% free space left.
1975 * - Any of the quotas for this inode are near an enforcement limit.
1976 */
1977 static inline bool
xfs_inodegc_want_queue_work(struct xfs_inode * ip,unsigned int items)1978 xfs_inodegc_want_queue_work(
1979 struct xfs_inode *ip,
1980 unsigned int items)
1981 {
1982 struct xfs_mount *mp = ip->i_mount;
1983
1984 if (items > mp->m_ino_geo.inodes_per_cluster)
1985 return true;
1986
1987 if (__percpu_counter_compare(&mp->m_fdblocks,
1988 mp->m_low_space[XFS_LOWSP_5_PCNT],
1989 XFS_FDBLOCKS_BATCH) < 0)
1990 return true;
1991
1992 if (xfs_inodegc_want_queue_rt_file(ip))
1993 return true;
1994
1995 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_USER))
1996 return true;
1997
1998 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_GROUP))
1999 return true;
2000
2001 if (xfs_inode_near_dquot_enforcement(ip, XFS_DQTYPE_PROJ))
2002 return true;
2003
2004 return false;
2005 }
2006
2007 /*
2008 * Upper bound on the number of inodes in each AG that can be queued for
2009 * inactivation at any given time, to avoid monopolizing the workqueue.
2010 */
2011 #define XFS_INODEGC_MAX_BACKLOG (4 * XFS_INODES_PER_CHUNK)
2012
2013 /*
2014 * Make the frontend wait for inactivations when:
2015 *
2016 * - Memory shrinkers queued the inactivation worker and it hasn't finished.
2017 * - The queue depth exceeds the maximum allowable percpu backlog.
2018 *
2019 * Note: If the current thread is running a transaction, we don't ever want to
2020 * wait for other transactions because that could introduce a deadlock.
2021 */
2022 static inline bool
xfs_inodegc_want_flush_work(struct xfs_inode * ip,unsigned int items,unsigned int shrinker_hits)2023 xfs_inodegc_want_flush_work(
2024 struct xfs_inode *ip,
2025 unsigned int items,
2026 unsigned int shrinker_hits)
2027 {
2028 if (current->journal_info)
2029 return false;
2030
2031 if (shrinker_hits > 0)
2032 return true;
2033
2034 if (items > XFS_INODEGC_MAX_BACKLOG)
2035 return true;
2036
2037 return false;
2038 }
2039
2040 /*
2041 * Queue a background inactivation worker if there are inodes that need to be
2042 * inactivated and higher level xfs code hasn't disabled the background
2043 * workers.
2044 */
2045 static void
xfs_inodegc_queue(struct xfs_inode * ip)2046 xfs_inodegc_queue(
2047 struct xfs_inode *ip)
2048 {
2049 struct xfs_mount *mp = ip->i_mount;
2050 struct xfs_inodegc *gc;
2051 int items;
2052 unsigned int shrinker_hits;
2053 unsigned long queue_delay = 1;
2054
2055 trace_xfs_inode_set_need_inactive(ip);
2056 spin_lock(&ip->i_flags_lock);
2057 ip->i_flags |= XFS_NEED_INACTIVE;
2058 spin_unlock(&ip->i_flags_lock);
2059
2060 gc = get_cpu_ptr(mp->m_inodegc);
2061 llist_add(&ip->i_gclist, &gc->list);
2062 items = READ_ONCE(gc->items);
2063 WRITE_ONCE(gc->items, items + 1);
2064 shrinker_hits = READ_ONCE(gc->shrinker_hits);
2065
2066 /*
2067 * We queue the work while holding the current CPU so that the work
2068 * is scheduled to run on this CPU.
2069 */
2070 if (!xfs_is_inodegc_enabled(mp)) {
2071 put_cpu_ptr(gc);
2072 return;
2073 }
2074
2075 if (xfs_inodegc_want_queue_work(ip, items))
2076 queue_delay = 0;
2077
2078 trace_xfs_inodegc_queue(mp, __return_address);
2079 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2080 queue_delay);
2081 put_cpu_ptr(gc);
2082
2083 if (xfs_inodegc_want_flush_work(ip, items, shrinker_hits)) {
2084 trace_xfs_inodegc_throttle(mp, __return_address);
2085 flush_delayed_work(&gc->work);
2086 }
2087 }
2088
2089 /*
2090 * Fold the dead CPU inodegc queue into the current CPUs queue.
2091 */
2092 void
xfs_inodegc_cpu_dead(struct xfs_mount * mp,unsigned int dead_cpu)2093 xfs_inodegc_cpu_dead(
2094 struct xfs_mount *mp,
2095 unsigned int dead_cpu)
2096 {
2097 struct xfs_inodegc *dead_gc, *gc;
2098 struct llist_node *first, *last;
2099 unsigned int count = 0;
2100
2101 dead_gc = per_cpu_ptr(mp->m_inodegc, dead_cpu);
2102 cancel_delayed_work_sync(&dead_gc->work);
2103
2104 if (llist_empty(&dead_gc->list))
2105 return;
2106
2107 first = dead_gc->list.first;
2108 last = first;
2109 while (last->next) {
2110 last = last->next;
2111 count++;
2112 }
2113 dead_gc->list.first = NULL;
2114 dead_gc->items = 0;
2115
2116 /* Add pending work to current CPU */
2117 gc = get_cpu_ptr(mp->m_inodegc);
2118 llist_add_batch(first, last, &gc->list);
2119 count += READ_ONCE(gc->items);
2120 WRITE_ONCE(gc->items, count);
2121
2122 if (xfs_is_inodegc_enabled(mp)) {
2123 trace_xfs_inodegc_queue(mp, __return_address);
2124 mod_delayed_work_on(current_cpu(), mp->m_inodegc_wq, &gc->work,
2125 0);
2126 }
2127 put_cpu_ptr(gc);
2128 }
2129
2130 /*
2131 * We set the inode flag atomically with the radix tree tag. Once we get tag
2132 * lookups on the radix tree, this inode flag can go away.
2133 *
2134 * We always use background reclaim here because even if the inode is clean, it
2135 * still may be under IO and hence we have wait for IO completion to occur
2136 * before we can reclaim the inode. The background reclaim path handles this
2137 * more efficiently than we can here, so simply let background reclaim tear down
2138 * all inodes.
2139 */
2140 void
xfs_inode_mark_reclaimable(struct xfs_inode * ip)2141 xfs_inode_mark_reclaimable(
2142 struct xfs_inode *ip)
2143 {
2144 struct xfs_mount *mp = ip->i_mount;
2145 bool need_inactive;
2146
2147 XFS_STATS_INC(mp, vn_reclaim);
2148
2149 /*
2150 * We should never get here with any of the reclaim flags already set.
2151 */
2152 ASSERT_ALWAYS(!xfs_iflags_test(ip, XFS_ALL_IRECLAIM_FLAGS));
2153
2154 need_inactive = xfs_inode_needs_inactive(ip);
2155 if (need_inactive) {
2156 xfs_inodegc_queue(ip);
2157 return;
2158 }
2159
2160 /* Going straight to reclaim, so drop the dquots. */
2161 xfs_qm_dqdetach(ip);
2162 xfs_inodegc_set_reclaimable(ip);
2163 }
2164
2165 /*
2166 * Register a phony shrinker so that we can run background inodegc sooner when
2167 * there's memory pressure. Inactivation does not itself free any memory but
2168 * it does make inodes reclaimable, which eventually frees memory.
2169 *
2170 * The count function, seek value, and batch value are crafted to trigger the
2171 * scan function during the second round of scanning. Hopefully this means
2172 * that we reclaimed enough memory that initiating metadata transactions won't
2173 * make things worse.
2174 */
2175 #define XFS_INODEGC_SHRINKER_COUNT (1UL << DEF_PRIORITY)
2176 #define XFS_INODEGC_SHRINKER_BATCH ((XFS_INODEGC_SHRINKER_COUNT / 2) + 1)
2177
2178 static unsigned long
xfs_inodegc_shrinker_count(struct shrinker * shrink,struct shrink_control * sc)2179 xfs_inodegc_shrinker_count(
2180 struct shrinker *shrink,
2181 struct shrink_control *sc)
2182 {
2183 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2184 m_inodegc_shrinker);
2185 struct xfs_inodegc *gc;
2186 int cpu;
2187
2188 if (!xfs_is_inodegc_enabled(mp))
2189 return 0;
2190
2191 for_each_online_cpu(cpu) {
2192 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2193 if (!llist_empty(&gc->list))
2194 return XFS_INODEGC_SHRINKER_COUNT;
2195 }
2196
2197 return 0;
2198 }
2199
2200 static unsigned long
xfs_inodegc_shrinker_scan(struct shrinker * shrink,struct shrink_control * sc)2201 xfs_inodegc_shrinker_scan(
2202 struct shrinker *shrink,
2203 struct shrink_control *sc)
2204 {
2205 struct xfs_mount *mp = container_of(shrink, struct xfs_mount,
2206 m_inodegc_shrinker);
2207 struct xfs_inodegc *gc;
2208 int cpu;
2209 bool no_items = true;
2210
2211 if (!xfs_is_inodegc_enabled(mp))
2212 return SHRINK_STOP;
2213
2214 trace_xfs_inodegc_shrinker_scan(mp, sc, __return_address);
2215
2216 for_each_online_cpu(cpu) {
2217 gc = per_cpu_ptr(mp->m_inodegc, cpu);
2218 if (!llist_empty(&gc->list)) {
2219 unsigned int h = READ_ONCE(gc->shrinker_hits);
2220
2221 WRITE_ONCE(gc->shrinker_hits, h + 1);
2222 mod_delayed_work_on(cpu, mp->m_inodegc_wq, &gc->work, 0);
2223 no_items = false;
2224 }
2225 }
2226
2227 /*
2228 * If there are no inodes to inactivate, we don't want the shrinker
2229 * to think there's deferred work to call us back about.
2230 */
2231 if (no_items)
2232 return LONG_MAX;
2233
2234 return SHRINK_STOP;
2235 }
2236
2237 /* Register a shrinker so we can accelerate inodegc and throttle queuing. */
2238 int
xfs_inodegc_register_shrinker(struct xfs_mount * mp)2239 xfs_inodegc_register_shrinker(
2240 struct xfs_mount *mp)
2241 {
2242 struct shrinker *shrink = &mp->m_inodegc_shrinker;
2243
2244 shrink->count_objects = xfs_inodegc_shrinker_count;
2245 shrink->scan_objects = xfs_inodegc_shrinker_scan;
2246 shrink->seeks = 0;
2247 shrink->flags = SHRINKER_NONSLAB;
2248 shrink->batch = XFS_INODEGC_SHRINKER_BATCH;
2249
2250 return register_shrinker(shrink, "xfs-inodegc:%s", mp->m_super->s_id);
2251 }
2252