1 /*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_sb.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_error.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_quota.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_bmap_util.h"
34 #include "xfs_dquot_item.h"
35 #include "xfs_dquot.h"
36
37 #include <linux/kthread.h>
38 #include <linux/freezer.h>
39
40 STATIC void __xfs_inode_clear_reclaim_tag(struct xfs_mount *mp,
41 struct xfs_perag *pag, struct xfs_inode *ip);
42
43 /*
44 * Allocate and initialise an xfs_inode.
45 */
46 struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)47 xfs_inode_alloc(
48 struct xfs_mount *mp,
49 xfs_ino_t ino)
50 {
51 struct xfs_inode *ip;
52
53 /*
54 * if this didn't occur in transactions, we could use
55 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
56 * code up to do this anyway.
57 */
58 ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
59 if (!ip)
60 return NULL;
61 if (inode_init_always(mp->m_super, VFS_I(ip))) {
62 kmem_zone_free(xfs_inode_zone, ip);
63 return NULL;
64 }
65
66 XFS_STATS_INC(mp, vn_active);
67 ASSERT(atomic_read(&ip->i_pincount) == 0);
68 ASSERT(!spin_is_locked(&ip->i_flags_lock));
69 ASSERT(!xfs_isiflocked(ip));
70 ASSERT(ip->i_ino == 0);
71
72 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
73
74 /* initialise the xfs inode */
75 ip->i_ino = ino;
76 ip->i_mount = mp;
77 memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
78 ip->i_afp = NULL;
79 memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
80 ip->i_flags = 0;
81 ip->i_delayed_blks = 0;
82 memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
83
84 return ip;
85 }
86
87 STATIC void
xfs_inode_free_callback(struct rcu_head * head)88 xfs_inode_free_callback(
89 struct rcu_head *head)
90 {
91 struct inode *inode = container_of(head, struct inode, i_rcu);
92 struct xfs_inode *ip = XFS_I(inode);
93
94 kmem_zone_free(xfs_inode_zone, ip);
95 }
96
97 void
xfs_inode_free(struct xfs_inode * ip)98 xfs_inode_free(
99 struct xfs_inode *ip)
100 {
101 switch (ip->i_d.di_mode & S_IFMT) {
102 case S_IFREG:
103 case S_IFDIR:
104 case S_IFLNK:
105 xfs_idestroy_fork(ip, XFS_DATA_FORK);
106 break;
107 }
108
109 if (ip->i_afp)
110 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
111
112 if (ip->i_itemp) {
113 ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
114 xfs_inode_item_destroy(ip);
115 ip->i_itemp = NULL;
116 }
117
118 /*
119 * Because we use RCU freeing we need to ensure the inode always
120 * appears to be reclaimed with an invalid inode number when in the
121 * free state. The ip->i_flags_lock provides the barrier against lookup
122 * races.
123 */
124 spin_lock(&ip->i_flags_lock);
125 ip->i_flags = XFS_IRECLAIM;
126 ip->i_ino = 0;
127 spin_unlock(&ip->i_flags_lock);
128
129 /* asserts to verify all state is correct here */
130 ASSERT(atomic_read(&ip->i_pincount) == 0);
131 ASSERT(!xfs_isiflocked(ip));
132 XFS_STATS_DEC(ip->i_mount, vn_active);
133
134 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
135 }
136
137 /*
138 * If we are allocating a new inode, then check what was returned is
139 * actually a free, empty inode. If we are not allocating an inode,
140 * then check we didn't find a free inode.
141 *
142 * Returns:
143 * 0 if the inode free state matches the lookup context
144 * -ENOENT if the inode is free and we are not allocating
145 * -EFSCORRUPTED if there is any state mismatch at all
146 */
147 static int
xfs_iget_check_free_state(struct xfs_inode * ip,int flags)148 xfs_iget_check_free_state(
149 struct xfs_inode *ip,
150 int flags)
151 {
152 if (flags & XFS_IGET_CREATE) {
153 /* should be a free inode */
154 if (ip->i_d.di_mode != 0) {
155 xfs_warn(ip->i_mount,
156 "Corruption detected! Free inode 0x%llx not marked free! (mode 0x%x)",
157 ip->i_ino, ip->i_d.di_mode);
158 return -EFSCORRUPTED;
159 }
160
161 if (ip->i_d.di_nblocks != 0) {
162 xfs_warn(ip->i_mount,
163 "Corruption detected! Free inode 0x%llx has blocks allocated!",
164 ip->i_ino);
165 return -EFSCORRUPTED;
166 }
167 return 0;
168 }
169
170 /* should be an allocated inode */
171 if (ip->i_d.di_mode == 0)
172 return -ENOENT;
173
174 return 0;
175 }
176
177 /*
178 * Check the validity of the inode we just found it the cache
179 */
180 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)181 xfs_iget_cache_hit(
182 struct xfs_perag *pag,
183 struct xfs_inode *ip,
184 xfs_ino_t ino,
185 int flags,
186 int lock_flags) __releases(RCU)
187 {
188 struct inode *inode = VFS_I(ip);
189 struct xfs_mount *mp = ip->i_mount;
190 int error;
191
192 /*
193 * check for re-use of an inode within an RCU grace period due to the
194 * radix tree nodes not being updated yet. We monitor for this by
195 * setting the inode number to zero before freeing the inode structure.
196 * If the inode has been reallocated and set up, then the inode number
197 * will not match, so check for that, too.
198 */
199 spin_lock(&ip->i_flags_lock);
200 if (ip->i_ino != ino) {
201 trace_xfs_iget_skip(ip);
202 XFS_STATS_INC(mp, xs_ig_frecycle);
203 error = -EAGAIN;
204 goto out_error;
205 }
206
207
208 /*
209 * If we are racing with another cache hit that is currently
210 * instantiating this inode or currently recycling it out of
211 * reclaimabe state, wait for the initialisation to complete
212 * before continuing.
213 *
214 * XXX(hch): eventually we should do something equivalent to
215 * wait_on_inode to wait for these flags to be cleared
216 * instead of polling for it.
217 */
218 if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
219 trace_xfs_iget_skip(ip);
220 XFS_STATS_INC(mp, xs_ig_frecycle);
221 error = -EAGAIN;
222 goto out_error;
223 }
224
225 /*
226 * Check the inode free state is valid. This also detects lookup
227 * racing with unlinks.
228 */
229 error = xfs_iget_check_free_state(ip, flags);
230 if (error)
231 goto out_error;
232
233 /*
234 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
235 * Need to carefully get it back into useable state.
236 */
237 if (ip->i_flags & XFS_IRECLAIMABLE) {
238 trace_xfs_iget_reclaim(ip);
239
240 /*
241 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
242 * from stomping over us while we recycle the inode. We can't
243 * clear the radix tree reclaimable tag yet as it requires
244 * pag_ici_lock to be held exclusive.
245 */
246 ip->i_flags |= XFS_IRECLAIM;
247
248 spin_unlock(&ip->i_flags_lock);
249 rcu_read_unlock();
250
251 error = inode_init_always(mp->m_super, inode);
252 if (error) {
253 bool wake;
254 /*
255 * Re-initializing the inode failed, and we are in deep
256 * trouble. Try to re-add it to the reclaim list.
257 */
258 rcu_read_lock();
259 spin_lock(&ip->i_flags_lock);
260 wake = !!__xfs_iflags_test(ip, XFS_INEW);
261 ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
262 if (wake)
263 wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
264 ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
265 trace_xfs_iget_reclaim_fail(ip);
266 goto out_error;
267 }
268
269 spin_lock(&pag->pag_ici_lock);
270 spin_lock(&ip->i_flags_lock);
271
272 /*
273 * Clear the per-lifetime state in the inode as we are now
274 * effectively a new inode and need to return to the initial
275 * state before reuse occurs.
276 */
277 ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
278 ip->i_flags |= XFS_INEW;
279 __xfs_inode_clear_reclaim_tag(mp, pag, ip);
280 inode->i_state = I_NEW;
281
282 ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
283 mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
284
285 spin_unlock(&ip->i_flags_lock);
286 spin_unlock(&pag->pag_ici_lock);
287 } else {
288 /* If the VFS inode is being torn down, pause and try again. */
289 if (!igrab(inode)) {
290 trace_xfs_iget_skip(ip);
291 error = -EAGAIN;
292 goto out_error;
293 }
294
295 /* We've got a live one. */
296 spin_unlock(&ip->i_flags_lock);
297 rcu_read_unlock();
298 trace_xfs_iget_hit(ip);
299 }
300
301 if (lock_flags != 0)
302 xfs_ilock(ip, lock_flags);
303
304 xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
305 XFS_STATS_INC(mp, xs_ig_found);
306
307 return 0;
308
309 out_error:
310 spin_unlock(&ip->i_flags_lock);
311 rcu_read_unlock();
312 return error;
313 }
314
315
316 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)317 xfs_iget_cache_miss(
318 struct xfs_mount *mp,
319 struct xfs_perag *pag,
320 xfs_trans_t *tp,
321 xfs_ino_t ino,
322 struct xfs_inode **ipp,
323 int flags,
324 int lock_flags)
325 {
326 struct xfs_inode *ip;
327 int error;
328 xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino);
329 int iflags;
330
331 ip = xfs_inode_alloc(mp, ino);
332 if (!ip)
333 return -ENOMEM;
334
335 error = xfs_iread(mp, tp, ip, flags);
336 if (error)
337 goto out_destroy;
338
339 trace_xfs_iget_miss(ip);
340
341
342 /*
343 * Check the inode free state is valid. This also detects lookup
344 * racing with unlinks.
345 */
346 error = xfs_iget_check_free_state(ip, flags);
347 if (error)
348 goto out_destroy;
349
350 /*
351 * Preload the radix tree so we can insert safely under the
352 * write spinlock. Note that we cannot sleep inside the preload
353 * region. Since we can be called from transaction context, don't
354 * recurse into the file system.
355 */
356 if (radix_tree_preload(GFP_NOFS)) {
357 error = -EAGAIN;
358 goto out_destroy;
359 }
360
361 /*
362 * Because the inode hasn't been added to the radix-tree yet it can't
363 * be found by another thread, so we can do the non-sleeping lock here.
364 */
365 if (lock_flags) {
366 if (!xfs_ilock_nowait(ip, lock_flags))
367 BUG();
368 }
369
370 /*
371 * These values must be set before inserting the inode into the radix
372 * tree as the moment it is inserted a concurrent lookup (allowed by the
373 * RCU locking mechanism) can find it and that lookup must see that this
374 * is an inode currently under construction (i.e. that XFS_INEW is set).
375 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
376 * memory barrier that ensures this detection works correctly at lookup
377 * time.
378 */
379 iflags = XFS_INEW;
380 if (flags & XFS_IGET_DONTCACHE)
381 iflags |= XFS_IDONTCACHE;
382 ip->i_udquot = NULL;
383 ip->i_gdquot = NULL;
384 ip->i_pdquot = NULL;
385 xfs_iflags_set(ip, iflags);
386
387 /* insert the new inode */
388 spin_lock(&pag->pag_ici_lock);
389 error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
390 if (unlikely(error)) {
391 WARN_ON(error != -EEXIST);
392 XFS_STATS_INC(mp, xs_ig_dup);
393 error = -EAGAIN;
394 goto out_preload_end;
395 }
396 spin_unlock(&pag->pag_ici_lock);
397 radix_tree_preload_end();
398
399 *ipp = ip;
400 return 0;
401
402 out_preload_end:
403 spin_unlock(&pag->pag_ici_lock);
404 radix_tree_preload_end();
405 if (lock_flags)
406 xfs_iunlock(ip, lock_flags);
407 out_destroy:
408 __destroy_inode(VFS_I(ip));
409 xfs_inode_free(ip);
410 return error;
411 }
412
413 static void
xfs_inew_wait(struct xfs_inode * ip)414 xfs_inew_wait(
415 struct xfs_inode *ip)
416 {
417 wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
418 DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
419
420 do {
421 prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
422 if (!xfs_iflags_test(ip, XFS_INEW))
423 break;
424 schedule();
425 } while (true);
426 finish_wait(wq, &wait.wait);
427 }
428
429 /*
430 * Look up an inode by number in the given file system.
431 * The inode is looked up in the cache held in each AG.
432 * If the inode is found in the cache, initialise the vfs inode
433 * if necessary.
434 *
435 * If it is not in core, read it in from the file system's device,
436 * add it to the cache and initialise the vfs inode.
437 *
438 * The inode is locked according to the value of the lock_flags parameter.
439 * This flag parameter indicates how and if the inode's IO lock and inode lock
440 * should be taken.
441 *
442 * mp -- the mount point structure for the current file system. It points
443 * to the inode hash table.
444 * tp -- a pointer to the current transaction if there is one. This is
445 * simply passed through to the xfs_iread() call.
446 * ino -- the number of the inode desired. This is the unique identifier
447 * within the file system for the inode being requested.
448 * lock_flags -- flags indicating how to lock the inode. See the comment
449 * for xfs_ilock() for a list of valid values.
450 */
451 int
xfs_iget(xfs_mount_t * mp,xfs_trans_t * tp,xfs_ino_t ino,uint flags,uint lock_flags,xfs_inode_t ** ipp)452 xfs_iget(
453 xfs_mount_t *mp,
454 xfs_trans_t *tp,
455 xfs_ino_t ino,
456 uint flags,
457 uint lock_flags,
458 xfs_inode_t **ipp)
459 {
460 xfs_inode_t *ip;
461 int error;
462 xfs_perag_t *pag;
463 xfs_agino_t agino;
464
465 /*
466 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
467 * doesn't get freed while it's being referenced during a
468 * radix tree traversal here. It assumes this function
469 * aqcuires only the ILOCK (and therefore it has no need to
470 * involve the IOLOCK in this synchronization).
471 */
472 ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
473
474 /* reject inode numbers outside existing AGs */
475 if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
476 return -EINVAL;
477
478 XFS_STATS_INC(mp, xs_ig_attempts);
479
480 /* get the perag structure and ensure that it's inode capable */
481 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
482 agino = XFS_INO_TO_AGINO(mp, ino);
483
484 again:
485 error = 0;
486 rcu_read_lock();
487 ip = radix_tree_lookup(&pag->pag_ici_root, agino);
488
489 if (ip) {
490 error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
491 if (error)
492 goto out_error_or_again;
493 } else {
494 rcu_read_unlock();
495 XFS_STATS_INC(mp, xs_ig_missed);
496
497 error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
498 flags, lock_flags);
499 if (error)
500 goto out_error_or_again;
501 }
502 xfs_perag_put(pag);
503
504 *ipp = ip;
505
506 /*
507 * If we have a real type for an on-disk inode, we can setup the inode
508 * now. If it's a new inode being created, xfs_ialloc will handle it.
509 */
510 if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0)
511 xfs_setup_existing_inode(ip);
512 return 0;
513
514 out_error_or_again:
515 if (error == -EAGAIN) {
516 delay(1);
517 goto again;
518 }
519 xfs_perag_put(pag);
520 return error;
521 }
522
523 /*
524 * The inode lookup is done in batches to keep the amount of lock traffic and
525 * radix tree lookups to a minimum. The batch size is a trade off between
526 * lookup reduction and stack usage. This is in the reclaim path, so we can't
527 * be too greedy.
528 */
529 #define XFS_LOOKUP_BATCH 32
530
531 STATIC int
xfs_inode_ag_walk_grab(struct xfs_inode * ip,int flags)532 xfs_inode_ag_walk_grab(
533 struct xfs_inode *ip,
534 int flags)
535 {
536 struct inode *inode = VFS_I(ip);
537 bool newinos = !!(flags & XFS_AGITER_INEW_WAIT);
538
539 ASSERT(rcu_read_lock_held());
540
541 /*
542 * check for stale RCU freed inode
543 *
544 * If the inode has been reallocated, it doesn't matter if it's not in
545 * the AG we are walking - we are walking for writeback, so if it
546 * passes all the "valid inode" checks and is dirty, then we'll write
547 * it back anyway. If it has been reallocated and still being
548 * initialised, the XFS_INEW check below will catch it.
549 */
550 spin_lock(&ip->i_flags_lock);
551 if (!ip->i_ino)
552 goto out_unlock_noent;
553
554 /* avoid new or reclaimable inodes. Leave for reclaim code to flush */
555 if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
556 __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
557 goto out_unlock_noent;
558 spin_unlock(&ip->i_flags_lock);
559
560 /* nothing to sync during shutdown */
561 if (XFS_FORCED_SHUTDOWN(ip->i_mount))
562 return -EFSCORRUPTED;
563
564 /* If we can't grab the inode, it must on it's way to reclaim. */
565 if (!igrab(inode))
566 return -ENOENT;
567
568 /* inode is valid */
569 return 0;
570
571 out_unlock_noent:
572 spin_unlock(&ip->i_flags_lock);
573 return -ENOENT;
574 }
575
576 STATIC int
xfs_inode_ag_walk(struct xfs_mount * mp,struct xfs_perag * pag,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args,int tag,int iter_flags)577 xfs_inode_ag_walk(
578 struct xfs_mount *mp,
579 struct xfs_perag *pag,
580 int (*execute)(struct xfs_inode *ip, int flags,
581 void *args),
582 int flags,
583 void *args,
584 int tag,
585 int iter_flags)
586 {
587 uint32_t first_index;
588 int last_error = 0;
589 int skipped;
590 int done;
591 int nr_found;
592
593 restart:
594 done = 0;
595 skipped = 0;
596 first_index = 0;
597 nr_found = 0;
598 do {
599 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
600 int error = 0;
601 int i;
602
603 rcu_read_lock();
604
605 if (tag == -1)
606 nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
607 (void **)batch, first_index,
608 XFS_LOOKUP_BATCH);
609 else
610 nr_found = radix_tree_gang_lookup_tag(
611 &pag->pag_ici_root,
612 (void **) batch, first_index,
613 XFS_LOOKUP_BATCH, tag);
614
615 if (!nr_found) {
616 rcu_read_unlock();
617 break;
618 }
619
620 /*
621 * Grab the inodes before we drop the lock. if we found
622 * nothing, nr == 0 and the loop will be skipped.
623 */
624 for (i = 0; i < nr_found; i++) {
625 struct xfs_inode *ip = batch[i];
626
627 if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
628 batch[i] = NULL;
629
630 /*
631 * Update the index for the next lookup. Catch
632 * overflows into the next AG range which can occur if
633 * we have inodes in the last block of the AG and we
634 * are currently pointing to the last inode.
635 *
636 * Because we may see inodes that are from the wrong AG
637 * due to RCU freeing and reallocation, only update the
638 * index if it lies in this AG. It was a race that lead
639 * us to see this inode, so another lookup from the
640 * same index will not find it again.
641 */
642 if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
643 continue;
644 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
645 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
646 done = 1;
647 }
648
649 /* unlock now we've grabbed the inodes. */
650 rcu_read_unlock();
651
652 for (i = 0; i < nr_found; i++) {
653 if (!batch[i])
654 continue;
655 if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
656 xfs_iflags_test(batch[i], XFS_INEW))
657 xfs_inew_wait(batch[i]);
658 error = execute(batch[i], flags, args);
659 IRELE(batch[i]);
660 if (error == -EAGAIN) {
661 skipped++;
662 continue;
663 }
664 if (error && last_error != -EFSCORRUPTED)
665 last_error = error;
666 }
667
668 /* bail out if the filesystem is corrupted. */
669 if (error == -EFSCORRUPTED)
670 break;
671
672 cond_resched();
673
674 } while (nr_found && !done);
675
676 if (skipped) {
677 delay(1);
678 goto restart;
679 }
680 return last_error;
681 }
682
683 /*
684 * Background scanning to trim post-EOF preallocated space. This is queued
685 * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
686 */
687 STATIC void
xfs_queue_eofblocks(struct xfs_mount * mp)688 xfs_queue_eofblocks(
689 struct xfs_mount *mp)
690 {
691 rcu_read_lock();
692 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
693 queue_delayed_work(mp->m_eofblocks_workqueue,
694 &mp->m_eofblocks_work,
695 msecs_to_jiffies(xfs_eofb_secs * 1000));
696 rcu_read_unlock();
697 }
698
699 void
xfs_eofblocks_worker(struct work_struct * work)700 xfs_eofblocks_worker(
701 struct work_struct *work)
702 {
703 struct xfs_mount *mp = container_of(to_delayed_work(work),
704 struct xfs_mount, m_eofblocks_work);
705 xfs_icache_free_eofblocks(mp, NULL);
706 xfs_queue_eofblocks(mp);
707 }
708
709 int
xfs_inode_ag_iterator_flags(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args,int iter_flags)710 xfs_inode_ag_iterator_flags(
711 struct xfs_mount *mp,
712 int (*execute)(struct xfs_inode *ip, int flags,
713 void *args),
714 int flags,
715 void *args,
716 int iter_flags)
717 {
718 struct xfs_perag *pag;
719 int error = 0;
720 int last_error = 0;
721 xfs_agnumber_t ag;
722
723 ag = 0;
724 while ((pag = xfs_perag_get(mp, ag))) {
725 ag = pag->pag_agno + 1;
726 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
727 iter_flags);
728 xfs_perag_put(pag);
729 if (error) {
730 last_error = error;
731 if (error == -EFSCORRUPTED)
732 break;
733 }
734 }
735 return last_error;
736 }
737
738 int
xfs_inode_ag_iterator(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args)739 xfs_inode_ag_iterator(
740 struct xfs_mount *mp,
741 int (*execute)(struct xfs_inode *ip, int flags,
742 void *args),
743 int flags,
744 void *args)
745 {
746 return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
747 }
748
749 int
xfs_inode_ag_iterator_tag(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args,int tag)750 xfs_inode_ag_iterator_tag(
751 struct xfs_mount *mp,
752 int (*execute)(struct xfs_inode *ip, int flags,
753 void *args),
754 int flags,
755 void *args,
756 int tag)
757 {
758 struct xfs_perag *pag;
759 int error = 0;
760 int last_error = 0;
761 xfs_agnumber_t ag;
762
763 ag = 0;
764 while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
765 ag = pag->pag_agno + 1;
766 error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
767 0);
768 xfs_perag_put(pag);
769 if (error) {
770 last_error = error;
771 if (error == -EFSCORRUPTED)
772 break;
773 }
774 }
775 return last_error;
776 }
777
778 /*
779 * Queue a new inode reclaim pass if there are reclaimable inodes and there
780 * isn't a reclaim pass already in progress. By default it runs every 5s based
781 * on the xfs periodic sync default of 30s. Perhaps this should have it's own
782 * tunable, but that can be done if this method proves to be ineffective or too
783 * aggressive.
784 */
785 static void
xfs_reclaim_work_queue(struct xfs_mount * mp)786 xfs_reclaim_work_queue(
787 struct xfs_mount *mp)
788 {
789
790 rcu_read_lock();
791 if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
792 queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
793 msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
794 }
795 rcu_read_unlock();
796 }
797
798 /*
799 * This is a fast pass over the inode cache to try to get reclaim moving on as
800 * many inodes as possible in a short period of time. It kicks itself every few
801 * seconds, as well as being kicked by the inode cache shrinker when memory
802 * goes low. It scans as quickly as possible avoiding locked inodes or those
803 * already being flushed, and once done schedules a future pass.
804 */
805 void
xfs_reclaim_worker(struct work_struct * work)806 xfs_reclaim_worker(
807 struct work_struct *work)
808 {
809 struct xfs_mount *mp = container_of(to_delayed_work(work),
810 struct xfs_mount, m_reclaim_work);
811
812 xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
813 xfs_reclaim_work_queue(mp);
814 }
815
816 static void
__xfs_inode_set_reclaim_tag(struct xfs_perag * pag,struct xfs_inode * ip)817 __xfs_inode_set_reclaim_tag(
818 struct xfs_perag *pag,
819 struct xfs_inode *ip)
820 {
821 radix_tree_tag_set(&pag->pag_ici_root,
822 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
823 XFS_ICI_RECLAIM_TAG);
824
825 if (!pag->pag_ici_reclaimable) {
826 /* propagate the reclaim tag up into the perag radix tree */
827 spin_lock(&ip->i_mount->m_perag_lock);
828 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
829 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
830 XFS_ICI_RECLAIM_TAG);
831 spin_unlock(&ip->i_mount->m_perag_lock);
832
833 /* schedule periodic background inode reclaim */
834 xfs_reclaim_work_queue(ip->i_mount);
835
836 trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
837 -1, _RET_IP_);
838 }
839 pag->pag_ici_reclaimable++;
840 }
841
842 /*
843 * We set the inode flag atomically with the radix tree tag.
844 * Once we get tag lookups on the radix tree, this inode flag
845 * can go away.
846 */
847 void
xfs_inode_set_reclaim_tag(xfs_inode_t * ip)848 xfs_inode_set_reclaim_tag(
849 xfs_inode_t *ip)
850 {
851 struct xfs_mount *mp = ip->i_mount;
852 struct xfs_perag *pag;
853
854 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
855 spin_lock(&pag->pag_ici_lock);
856 spin_lock(&ip->i_flags_lock);
857 __xfs_inode_set_reclaim_tag(pag, ip);
858 __xfs_iflags_set(ip, XFS_IRECLAIMABLE);
859 spin_unlock(&ip->i_flags_lock);
860 spin_unlock(&pag->pag_ici_lock);
861 xfs_perag_put(pag);
862 }
863
864 STATIC void
__xfs_inode_clear_reclaim(xfs_perag_t * pag,xfs_inode_t * ip)865 __xfs_inode_clear_reclaim(
866 xfs_perag_t *pag,
867 xfs_inode_t *ip)
868 {
869 pag->pag_ici_reclaimable--;
870 if (!pag->pag_ici_reclaimable) {
871 /* clear the reclaim tag from the perag radix tree */
872 spin_lock(&ip->i_mount->m_perag_lock);
873 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
874 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
875 XFS_ICI_RECLAIM_TAG);
876 spin_unlock(&ip->i_mount->m_perag_lock);
877 trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
878 -1, _RET_IP_);
879 }
880 }
881
882 STATIC void
__xfs_inode_clear_reclaim_tag(xfs_mount_t * mp,xfs_perag_t * pag,xfs_inode_t * ip)883 __xfs_inode_clear_reclaim_tag(
884 xfs_mount_t *mp,
885 xfs_perag_t *pag,
886 xfs_inode_t *ip)
887 {
888 radix_tree_tag_clear(&pag->pag_ici_root,
889 XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
890 __xfs_inode_clear_reclaim(pag, ip);
891 }
892
893 /*
894 * Grab the inode for reclaim exclusively.
895 * Return 0 if we grabbed it, non-zero otherwise.
896 */
897 STATIC int
xfs_reclaim_inode_grab(struct xfs_inode * ip,int flags)898 xfs_reclaim_inode_grab(
899 struct xfs_inode *ip,
900 int flags)
901 {
902 ASSERT(rcu_read_lock_held());
903
904 /* quick check for stale RCU freed inode */
905 if (!ip->i_ino)
906 return 1;
907
908 /*
909 * If we are asked for non-blocking operation, do unlocked checks to
910 * see if the inode already is being flushed or in reclaim to avoid
911 * lock traffic.
912 */
913 if ((flags & SYNC_TRYLOCK) &&
914 __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
915 return 1;
916
917 /*
918 * The radix tree lock here protects a thread in xfs_iget from racing
919 * with us starting reclaim on the inode. Once we have the
920 * XFS_IRECLAIM flag set it will not touch us.
921 *
922 * Due to RCU lookup, we may find inodes that have been freed and only
923 * have XFS_IRECLAIM set. Indeed, we may see reallocated inodes that
924 * aren't candidates for reclaim at all, so we must check the
925 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
926 */
927 spin_lock(&ip->i_flags_lock);
928 if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
929 __xfs_iflags_test(ip, XFS_IRECLAIM)) {
930 /* not a reclaim candidate. */
931 spin_unlock(&ip->i_flags_lock);
932 return 1;
933 }
934 __xfs_iflags_set(ip, XFS_IRECLAIM);
935 spin_unlock(&ip->i_flags_lock);
936 return 0;
937 }
938
939 /*
940 * Inodes in different states need to be treated differently. The following
941 * table lists the inode states and the reclaim actions necessary:
942 *
943 * inode state iflush ret required action
944 * --------------- ---------- ---------------
945 * bad - reclaim
946 * shutdown EIO unpin and reclaim
947 * clean, unpinned 0 reclaim
948 * stale, unpinned 0 reclaim
949 * clean, pinned(*) 0 requeue
950 * stale, pinned EAGAIN requeue
951 * dirty, async - requeue
952 * dirty, sync 0 reclaim
953 *
954 * (*) dgc: I don't think the clean, pinned state is possible but it gets
955 * handled anyway given the order of checks implemented.
956 *
957 * Also, because we get the flush lock first, we know that any inode that has
958 * been flushed delwri has had the flush completed by the time we check that
959 * the inode is clean.
960 *
961 * Note that because the inode is flushed delayed write by AIL pushing, the
962 * flush lock may already be held here and waiting on it can result in very
963 * long latencies. Hence for sync reclaims, where we wait on the flush lock,
964 * the caller should push the AIL first before trying to reclaim inodes to
965 * minimise the amount of time spent waiting. For background relaim, we only
966 * bother to reclaim clean inodes anyway.
967 *
968 * Hence the order of actions after gaining the locks should be:
969 * bad => reclaim
970 * shutdown => unpin and reclaim
971 * pinned, async => requeue
972 * pinned, sync => unpin
973 * stale => reclaim
974 * clean => reclaim
975 * dirty, async => requeue
976 * dirty, sync => flush, wait and reclaim
977 */
978 STATIC int
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag,int sync_mode)979 xfs_reclaim_inode(
980 struct xfs_inode *ip,
981 struct xfs_perag *pag,
982 int sync_mode)
983 {
984 struct xfs_buf *bp = NULL;
985 int error;
986
987 restart:
988 error = 0;
989 xfs_ilock(ip, XFS_ILOCK_EXCL);
990 if (!xfs_iflock_nowait(ip)) {
991 if (!(sync_mode & SYNC_WAIT))
992 goto out;
993 xfs_iflock(ip);
994 }
995
996 if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
997 xfs_iunpin_wait(ip);
998 xfs_iflush_abort(ip, false);
999 goto reclaim;
1000 }
1001 if (xfs_ipincount(ip)) {
1002 if (!(sync_mode & SYNC_WAIT))
1003 goto out_ifunlock;
1004 xfs_iunpin_wait(ip);
1005 }
1006 if (xfs_iflags_test(ip, XFS_ISTALE))
1007 goto reclaim;
1008 if (xfs_inode_clean(ip))
1009 goto reclaim;
1010
1011 /*
1012 * Never flush out dirty data during non-blocking reclaim, as it would
1013 * just contend with AIL pushing trying to do the same job.
1014 */
1015 if (!(sync_mode & SYNC_WAIT))
1016 goto out_ifunlock;
1017
1018 /*
1019 * Now we have an inode that needs flushing.
1020 *
1021 * Note that xfs_iflush will never block on the inode buffer lock, as
1022 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1023 * ip->i_lock, and we are doing the exact opposite here. As a result,
1024 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1025 * result in an ABBA deadlock with xfs_ifree_cluster().
1026 *
1027 * As xfs_ifree_cluser() must gather all inodes that are active in the
1028 * cache to mark them stale, if we hit this case we don't actually want
1029 * to do IO here - we want the inode marked stale so we can simply
1030 * reclaim it. Hence if we get an EAGAIN error here, just unlock the
1031 * inode, back off and try again. Hopefully the next pass through will
1032 * see the stale flag set on the inode.
1033 */
1034 error = xfs_iflush(ip, &bp);
1035 if (error == -EAGAIN) {
1036 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1037 /* backoff longer than in xfs_ifree_cluster */
1038 delay(2);
1039 goto restart;
1040 }
1041
1042 if (!error) {
1043 error = xfs_bwrite(bp);
1044 xfs_buf_relse(bp);
1045 }
1046
1047 xfs_iflock(ip);
1048 reclaim:
1049 xfs_ifunlock(ip);
1050 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1051
1052 XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1053 /*
1054 * Remove the inode from the per-AG radix tree.
1055 *
1056 * Because radix_tree_delete won't complain even if the item was never
1057 * added to the tree assert that it's been there before to catch
1058 * problems with the inode life time early on.
1059 */
1060 spin_lock(&pag->pag_ici_lock);
1061 if (!radix_tree_delete(&pag->pag_ici_root,
1062 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
1063 ASSERT(0);
1064 __xfs_inode_clear_reclaim(pag, ip);
1065 spin_unlock(&pag->pag_ici_lock);
1066
1067 /*
1068 * Here we do an (almost) spurious inode lock in order to coordinate
1069 * with inode cache radix tree lookups. This is because the lookup
1070 * can reference the inodes in the cache without taking references.
1071 *
1072 * We make that OK here by ensuring that we wait until the inode is
1073 * unlocked after the lookup before we go ahead and free it.
1074 */
1075 xfs_ilock(ip, XFS_ILOCK_EXCL);
1076 xfs_qm_dqdetach(ip);
1077 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1078
1079 xfs_inode_free(ip);
1080 return error;
1081
1082 out_ifunlock:
1083 xfs_ifunlock(ip);
1084 out:
1085 xfs_iflags_clear(ip, XFS_IRECLAIM);
1086 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1087 /*
1088 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1089 * a short while. However, this just burns CPU time scanning the tree
1090 * waiting for IO to complete and the reclaim work never goes back to
1091 * the idle state. Instead, return 0 to let the next scheduled
1092 * background reclaim attempt to reclaim the inode again.
1093 */
1094 return 0;
1095 }
1096
1097 /*
1098 * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1099 * corrupted, we still want to try to reclaim all the inodes. If we don't,
1100 * then a shut down during filesystem unmount reclaim walk leak all the
1101 * unreclaimed inodes.
1102 */
1103 STATIC int
xfs_reclaim_inodes_ag(struct xfs_mount * mp,int flags,int * nr_to_scan)1104 xfs_reclaim_inodes_ag(
1105 struct xfs_mount *mp,
1106 int flags,
1107 int *nr_to_scan)
1108 {
1109 struct xfs_perag *pag;
1110 int error = 0;
1111 int last_error = 0;
1112 xfs_agnumber_t ag;
1113 int trylock = flags & SYNC_TRYLOCK;
1114 int skipped;
1115
1116 restart:
1117 ag = 0;
1118 skipped = 0;
1119 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1120 unsigned long first_index = 0;
1121 int done = 0;
1122 int nr_found = 0;
1123
1124 ag = pag->pag_agno + 1;
1125
1126 if (trylock) {
1127 if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1128 skipped++;
1129 xfs_perag_put(pag);
1130 continue;
1131 }
1132 first_index = pag->pag_ici_reclaim_cursor;
1133 } else
1134 mutex_lock(&pag->pag_ici_reclaim_lock);
1135
1136 do {
1137 struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1138 int i;
1139
1140 rcu_read_lock();
1141 nr_found = radix_tree_gang_lookup_tag(
1142 &pag->pag_ici_root,
1143 (void **)batch, first_index,
1144 XFS_LOOKUP_BATCH,
1145 XFS_ICI_RECLAIM_TAG);
1146 if (!nr_found) {
1147 done = 1;
1148 rcu_read_unlock();
1149 break;
1150 }
1151
1152 /*
1153 * Grab the inodes before we drop the lock. if we found
1154 * nothing, nr == 0 and the loop will be skipped.
1155 */
1156 for (i = 0; i < nr_found; i++) {
1157 struct xfs_inode *ip = batch[i];
1158
1159 if (done || xfs_reclaim_inode_grab(ip, flags))
1160 batch[i] = NULL;
1161
1162 /*
1163 * Update the index for the next lookup. Catch
1164 * overflows into the next AG range which can
1165 * occur if we have inodes in the last block of
1166 * the AG and we are currently pointing to the
1167 * last inode.
1168 *
1169 * Because we may see inodes that are from the
1170 * wrong AG due to RCU freeing and
1171 * reallocation, only update the index if it
1172 * lies in this AG. It was a race that lead us
1173 * to see this inode, so another lookup from
1174 * the same index will not find it again.
1175 */
1176 if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1177 pag->pag_agno)
1178 continue;
1179 first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1180 if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1181 done = 1;
1182 }
1183
1184 /* unlock now we've grabbed the inodes. */
1185 rcu_read_unlock();
1186
1187 for (i = 0; i < nr_found; i++) {
1188 if (!batch[i])
1189 continue;
1190 error = xfs_reclaim_inode(batch[i], pag, flags);
1191 if (error && last_error != -EFSCORRUPTED)
1192 last_error = error;
1193 }
1194
1195 *nr_to_scan -= XFS_LOOKUP_BATCH;
1196
1197 cond_resched();
1198
1199 } while (nr_found && !done && *nr_to_scan > 0);
1200
1201 if (trylock && !done)
1202 pag->pag_ici_reclaim_cursor = first_index;
1203 else
1204 pag->pag_ici_reclaim_cursor = 0;
1205 mutex_unlock(&pag->pag_ici_reclaim_lock);
1206 xfs_perag_put(pag);
1207 }
1208
1209 /*
1210 * if we skipped any AG, and we still have scan count remaining, do
1211 * another pass this time using blocking reclaim semantics (i.e
1212 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1213 * ensure that when we get more reclaimers than AGs we block rather
1214 * than spin trying to execute reclaim.
1215 */
1216 if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1217 trylock = 0;
1218 goto restart;
1219 }
1220 return last_error;
1221 }
1222
1223 int
xfs_reclaim_inodes(xfs_mount_t * mp,int mode)1224 xfs_reclaim_inodes(
1225 xfs_mount_t *mp,
1226 int mode)
1227 {
1228 int nr_to_scan = INT_MAX;
1229
1230 return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1231 }
1232
1233 /*
1234 * Scan a certain number of inodes for reclaim.
1235 *
1236 * When called we make sure that there is a background (fast) inode reclaim in
1237 * progress, while we will throttle the speed of reclaim via doing synchronous
1238 * reclaim of inodes. That means if we come across dirty inodes, we wait for
1239 * them to be cleaned, which we hope will not be very long due to the
1240 * background walker having already kicked the IO off on those dirty inodes.
1241 */
1242 long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,int nr_to_scan)1243 xfs_reclaim_inodes_nr(
1244 struct xfs_mount *mp,
1245 int nr_to_scan)
1246 {
1247 /* kick background reclaimer and push the AIL */
1248 xfs_reclaim_work_queue(mp);
1249 xfs_ail_push_all(mp->m_ail);
1250
1251 return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1252 }
1253
1254 /*
1255 * Return the number of reclaimable inodes in the filesystem for
1256 * the shrinker to determine how much to reclaim.
1257 */
1258 int
xfs_reclaim_inodes_count(struct xfs_mount * mp)1259 xfs_reclaim_inodes_count(
1260 struct xfs_mount *mp)
1261 {
1262 struct xfs_perag *pag;
1263 xfs_agnumber_t ag = 0;
1264 int reclaimable = 0;
1265
1266 while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1267 ag = pag->pag_agno + 1;
1268 reclaimable += pag->pag_ici_reclaimable;
1269 xfs_perag_put(pag);
1270 }
1271 return reclaimable;
1272 }
1273
1274 STATIC int
xfs_inode_match_id(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1275 xfs_inode_match_id(
1276 struct xfs_inode *ip,
1277 struct xfs_eofblocks *eofb)
1278 {
1279 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1280 !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1281 return 0;
1282
1283 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1284 !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1285 return 0;
1286
1287 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1288 xfs_get_projid(ip) != eofb->eof_prid)
1289 return 0;
1290
1291 return 1;
1292 }
1293
1294 /*
1295 * A union-based inode filtering algorithm. Process the inode if any of the
1296 * criteria match. This is for global/internal scans only.
1297 */
1298 STATIC int
xfs_inode_match_id_union(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1299 xfs_inode_match_id_union(
1300 struct xfs_inode *ip,
1301 struct xfs_eofblocks *eofb)
1302 {
1303 if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1304 uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1305 return 1;
1306
1307 if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1308 gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1309 return 1;
1310
1311 if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1312 xfs_get_projid(ip) == eofb->eof_prid)
1313 return 1;
1314
1315 return 0;
1316 }
1317
1318 STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,int flags,void * args)1319 xfs_inode_free_eofblocks(
1320 struct xfs_inode *ip,
1321 int flags,
1322 void *args)
1323 {
1324 int ret;
1325 struct xfs_eofblocks *eofb = args;
1326 bool need_iolock = true;
1327 int match;
1328
1329 ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
1330
1331 if (!xfs_can_free_eofblocks(ip, false)) {
1332 /* inode could be preallocated or append-only */
1333 trace_xfs_inode_free_eofblocks_invalid(ip);
1334 xfs_inode_clear_eofblocks_tag(ip);
1335 return 0;
1336 }
1337
1338 /*
1339 * If the mapping is dirty the operation can block and wait for some
1340 * time. Unless we are waiting, skip it.
1341 */
1342 if (!(flags & SYNC_WAIT) &&
1343 mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1344 return 0;
1345
1346 if (eofb) {
1347 if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1348 match = xfs_inode_match_id_union(ip, eofb);
1349 else
1350 match = xfs_inode_match_id(ip, eofb);
1351 if (!match)
1352 return 0;
1353
1354 /* skip the inode if the file size is too small */
1355 if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1356 XFS_ISIZE(ip) < eofb->eof_min_file_size)
1357 return 0;
1358
1359 /*
1360 * A scan owner implies we already hold the iolock. Skip it in
1361 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
1362 * the possibility of EAGAIN being returned.
1363 */
1364 if (eofb->eof_scan_owner == ip->i_ino)
1365 need_iolock = false;
1366 }
1367
1368 ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
1369
1370 /* don't revisit the inode if we're not waiting */
1371 if (ret == -EAGAIN && !(flags & SYNC_WAIT))
1372 ret = 0;
1373
1374 return ret;
1375 }
1376
1377 int
xfs_icache_free_eofblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1378 xfs_icache_free_eofblocks(
1379 struct xfs_mount *mp,
1380 struct xfs_eofblocks *eofb)
1381 {
1382 int flags = SYNC_TRYLOCK;
1383
1384 if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1385 flags = SYNC_WAIT;
1386
1387 return xfs_inode_ag_iterator_tag(mp, xfs_inode_free_eofblocks, flags,
1388 eofb, XFS_ICI_EOFBLOCKS_TAG);
1389 }
1390
1391 /*
1392 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1393 * multiple quotas, we don't know exactly which quota caused an allocation
1394 * failure. We make a best effort by including each quota under low free space
1395 * conditions (less than 1% free space) in the scan.
1396 */
1397 int
xfs_inode_free_quota_eofblocks(struct xfs_inode * ip)1398 xfs_inode_free_quota_eofblocks(
1399 struct xfs_inode *ip)
1400 {
1401 int scan = 0;
1402 struct xfs_eofblocks eofb = {0};
1403 struct xfs_dquot *dq;
1404
1405 ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
1406
1407 /*
1408 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
1409 * can repeatedly trylock on the inode we're currently processing. We
1410 * run a sync scan to increase effectiveness and use the union filter to
1411 * cover all applicable quotas in a single scan.
1412 */
1413 eofb.eof_scan_owner = ip->i_ino;
1414 eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1415
1416 if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1417 dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1418 if (dq && xfs_dquot_lowsp(dq)) {
1419 eofb.eof_uid = VFS_I(ip)->i_uid;
1420 eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1421 scan = 1;
1422 }
1423 }
1424
1425 if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1426 dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1427 if (dq && xfs_dquot_lowsp(dq)) {
1428 eofb.eof_gid = VFS_I(ip)->i_gid;
1429 eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1430 scan = 1;
1431 }
1432 }
1433
1434 if (scan)
1435 xfs_icache_free_eofblocks(ip->i_mount, &eofb);
1436
1437 return scan;
1438 }
1439
1440 void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1441 xfs_inode_set_eofblocks_tag(
1442 xfs_inode_t *ip)
1443 {
1444 struct xfs_mount *mp = ip->i_mount;
1445 struct xfs_perag *pag;
1446 int tagged;
1447
1448 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1449 spin_lock(&pag->pag_ici_lock);
1450 trace_xfs_inode_set_eofblocks_tag(ip);
1451
1452 tagged = radix_tree_tagged(&pag->pag_ici_root,
1453 XFS_ICI_EOFBLOCKS_TAG);
1454 radix_tree_tag_set(&pag->pag_ici_root,
1455 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1456 XFS_ICI_EOFBLOCKS_TAG);
1457 if (!tagged) {
1458 /* propagate the eofblocks tag up into the perag radix tree */
1459 spin_lock(&ip->i_mount->m_perag_lock);
1460 radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1461 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1462 XFS_ICI_EOFBLOCKS_TAG);
1463 spin_unlock(&ip->i_mount->m_perag_lock);
1464
1465 /* kick off background trimming */
1466 xfs_queue_eofblocks(ip->i_mount);
1467
1468 trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno,
1469 -1, _RET_IP_);
1470 }
1471
1472 spin_unlock(&pag->pag_ici_lock);
1473 xfs_perag_put(pag);
1474 }
1475
1476 void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1477 xfs_inode_clear_eofblocks_tag(
1478 xfs_inode_t *ip)
1479 {
1480 struct xfs_mount *mp = ip->i_mount;
1481 struct xfs_perag *pag;
1482
1483 pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1484 spin_lock(&pag->pag_ici_lock);
1485 trace_xfs_inode_clear_eofblocks_tag(ip);
1486
1487 radix_tree_tag_clear(&pag->pag_ici_root,
1488 XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
1489 XFS_ICI_EOFBLOCKS_TAG);
1490 if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) {
1491 /* clear the eofblocks tag from the perag radix tree */
1492 spin_lock(&ip->i_mount->m_perag_lock);
1493 radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1494 XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1495 XFS_ICI_EOFBLOCKS_TAG);
1496 spin_unlock(&ip->i_mount->m_perag_lock);
1497 trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno,
1498 -1, _RET_IP_);
1499 }
1500
1501 spin_unlock(&pag->pag_ici_lock);
1502 xfs_perag_put(pag);
1503 }
1504
1505