• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18 #include "xfs.h"
19 #include "xfs_fs.h"
20 #include "xfs_format.h"
21 #include "xfs_log_format.h"
22 #include "xfs_trans_resv.h"
23 #include "xfs_sb.h"
24 #include "xfs_mount.h"
25 #include "xfs_inode.h"
26 #include "xfs_error.h"
27 #include "xfs_trans.h"
28 #include "xfs_trans_priv.h"
29 #include "xfs_inode_item.h"
30 #include "xfs_quota.h"
31 #include "xfs_trace.h"
32 #include "xfs_icache.h"
33 #include "xfs_bmap_util.h"
34 #include "xfs_dquot_item.h"
35 #include "xfs_dquot.h"
36 #include "xfs_reflink.h"
37 
38 #include <linux/kthread.h>
39 #include <linux/freezer.h>
40 
41 /*
42  * Allocate and initialise an xfs_inode.
43  */
44 struct xfs_inode *
xfs_inode_alloc(struct xfs_mount * mp,xfs_ino_t ino)45 xfs_inode_alloc(
46 	struct xfs_mount	*mp,
47 	xfs_ino_t		ino)
48 {
49 	struct xfs_inode	*ip;
50 
51 	/*
52 	 * if this didn't occur in transactions, we could use
53 	 * KM_MAYFAIL and return NULL here on ENOMEM. Set the
54 	 * code up to do this anyway.
55 	 */
56 	ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP);
57 	if (!ip)
58 		return NULL;
59 	if (inode_init_always(mp->m_super, VFS_I(ip))) {
60 		kmem_zone_free(xfs_inode_zone, ip);
61 		return NULL;
62 	}
63 
64 	/* VFS doesn't initialise i_mode! */
65 	VFS_I(ip)->i_mode = 0;
66 
67 	XFS_STATS_INC(mp, vn_active);
68 	ASSERT(atomic_read(&ip->i_pincount) == 0);
69 	ASSERT(!xfs_isiflocked(ip));
70 	ASSERT(ip->i_ino == 0);
71 
72 	mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
73 
74 	/* initialise the xfs inode */
75 	ip->i_ino = ino;
76 	ip->i_mount = mp;
77 	memset(&ip->i_imap, 0, sizeof(struct xfs_imap));
78 	ip->i_afp = NULL;
79 	ip->i_cowfp = NULL;
80 	ip->i_cnextents = 0;
81 	ip->i_cformat = XFS_DINODE_FMT_EXTENTS;
82 	memset(&ip->i_df, 0, sizeof(xfs_ifork_t));
83 	ip->i_flags = 0;
84 	ip->i_delayed_blks = 0;
85 	memset(&ip->i_d, 0, sizeof(ip->i_d));
86 
87 	return ip;
88 }
89 
90 STATIC void
xfs_inode_free_callback(struct rcu_head * head)91 xfs_inode_free_callback(
92 	struct rcu_head		*head)
93 {
94 	struct inode		*inode = container_of(head, struct inode, i_rcu);
95 	struct xfs_inode	*ip = XFS_I(inode);
96 
97 	switch (VFS_I(ip)->i_mode & S_IFMT) {
98 	case S_IFREG:
99 	case S_IFDIR:
100 	case S_IFLNK:
101 		xfs_idestroy_fork(ip, XFS_DATA_FORK);
102 		break;
103 	}
104 
105 	if (ip->i_afp)
106 		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
107 	if (ip->i_cowfp)
108 		xfs_idestroy_fork(ip, XFS_COW_FORK);
109 
110 	if (ip->i_itemp) {
111 		ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL));
112 		xfs_inode_item_destroy(ip);
113 		ip->i_itemp = NULL;
114 	}
115 
116 	kmem_zone_free(xfs_inode_zone, ip);
117 }
118 
119 static void
__xfs_inode_free(struct xfs_inode * ip)120 __xfs_inode_free(
121 	struct xfs_inode	*ip)
122 {
123 	/* asserts to verify all state is correct here */
124 	ASSERT(atomic_read(&ip->i_pincount) == 0);
125 	XFS_STATS_DEC(ip->i_mount, vn_active);
126 
127 	call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
128 }
129 
130 void
xfs_inode_free(struct xfs_inode * ip)131 xfs_inode_free(
132 	struct xfs_inode	*ip)
133 {
134 	ASSERT(!xfs_isiflocked(ip));
135 
136 	/*
137 	 * Because we use RCU freeing we need to ensure the inode always
138 	 * appears to be reclaimed with an invalid inode number when in the
139 	 * free state. The ip->i_flags_lock provides the barrier against lookup
140 	 * races.
141 	 */
142 	spin_lock(&ip->i_flags_lock);
143 	ip->i_flags = XFS_IRECLAIM;
144 	ip->i_ino = 0;
145 	spin_unlock(&ip->i_flags_lock);
146 
147 	__xfs_inode_free(ip);
148 }
149 
150 /*
151  * Queue a new inode reclaim pass if there are reclaimable inodes and there
152  * isn't a reclaim pass already in progress. By default it runs every 5s based
153  * on the xfs periodic sync default of 30s. Perhaps this should have it's own
154  * tunable, but that can be done if this method proves to be ineffective or too
155  * aggressive.
156  */
157 static void
xfs_reclaim_work_queue(struct xfs_mount * mp)158 xfs_reclaim_work_queue(
159 	struct xfs_mount        *mp)
160 {
161 
162 	rcu_read_lock();
163 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_RECLAIM_TAG)) {
164 		queue_delayed_work(mp->m_reclaim_workqueue, &mp->m_reclaim_work,
165 			msecs_to_jiffies(xfs_syncd_centisecs / 6 * 10));
166 	}
167 	rcu_read_unlock();
168 }
169 
170 /*
171  * This is a fast pass over the inode cache to try to get reclaim moving on as
172  * many inodes as possible in a short period of time. It kicks itself every few
173  * seconds, as well as being kicked by the inode cache shrinker when memory
174  * goes low. It scans as quickly as possible avoiding locked inodes or those
175  * already being flushed, and once done schedules a future pass.
176  */
177 void
xfs_reclaim_worker(struct work_struct * work)178 xfs_reclaim_worker(
179 	struct work_struct *work)
180 {
181 	struct xfs_mount *mp = container_of(to_delayed_work(work),
182 					struct xfs_mount, m_reclaim_work);
183 
184 	xfs_reclaim_inodes(mp, SYNC_TRYLOCK);
185 	xfs_reclaim_work_queue(mp);
186 }
187 
188 static void
xfs_perag_set_reclaim_tag(struct xfs_perag * pag)189 xfs_perag_set_reclaim_tag(
190 	struct xfs_perag	*pag)
191 {
192 	struct xfs_mount	*mp = pag->pag_mount;
193 
194 	lockdep_assert_held(&pag->pag_ici_lock);
195 	if (pag->pag_ici_reclaimable++)
196 		return;
197 
198 	/* propagate the reclaim tag up into the perag radix tree */
199 	spin_lock(&mp->m_perag_lock);
200 	radix_tree_tag_set(&mp->m_perag_tree, pag->pag_agno,
201 			   XFS_ICI_RECLAIM_TAG);
202 	spin_unlock(&mp->m_perag_lock);
203 
204 	/* schedule periodic background inode reclaim */
205 	xfs_reclaim_work_queue(mp);
206 
207 	trace_xfs_perag_set_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
208 }
209 
210 static void
xfs_perag_clear_reclaim_tag(struct xfs_perag * pag)211 xfs_perag_clear_reclaim_tag(
212 	struct xfs_perag	*pag)
213 {
214 	struct xfs_mount	*mp = pag->pag_mount;
215 
216 	lockdep_assert_held(&pag->pag_ici_lock);
217 	if (--pag->pag_ici_reclaimable)
218 		return;
219 
220 	/* clear the reclaim tag from the perag radix tree */
221 	spin_lock(&mp->m_perag_lock);
222 	radix_tree_tag_clear(&mp->m_perag_tree, pag->pag_agno,
223 			     XFS_ICI_RECLAIM_TAG);
224 	spin_unlock(&mp->m_perag_lock);
225 	trace_xfs_perag_clear_reclaim(mp, pag->pag_agno, -1, _RET_IP_);
226 }
227 
228 
229 /*
230  * We set the inode flag atomically with the radix tree tag.
231  * Once we get tag lookups on the radix tree, this inode flag
232  * can go away.
233  */
234 void
xfs_inode_set_reclaim_tag(struct xfs_inode * ip)235 xfs_inode_set_reclaim_tag(
236 	struct xfs_inode	*ip)
237 {
238 	struct xfs_mount	*mp = ip->i_mount;
239 	struct xfs_perag	*pag;
240 
241 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
242 	spin_lock(&pag->pag_ici_lock);
243 	spin_lock(&ip->i_flags_lock);
244 
245 	radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino),
246 			   XFS_ICI_RECLAIM_TAG);
247 	xfs_perag_set_reclaim_tag(pag);
248 	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
249 
250 	spin_unlock(&ip->i_flags_lock);
251 	spin_unlock(&pag->pag_ici_lock);
252 	xfs_perag_put(pag);
253 }
254 
255 STATIC void
xfs_inode_clear_reclaim_tag(struct xfs_perag * pag,xfs_ino_t ino)256 xfs_inode_clear_reclaim_tag(
257 	struct xfs_perag	*pag,
258 	xfs_ino_t		ino)
259 {
260 	radix_tree_tag_clear(&pag->pag_ici_root,
261 			     XFS_INO_TO_AGINO(pag->pag_mount, ino),
262 			     XFS_ICI_RECLAIM_TAG);
263 	xfs_perag_clear_reclaim_tag(pag);
264 }
265 
266 static void
xfs_inew_wait(struct xfs_inode * ip)267 xfs_inew_wait(
268 	struct xfs_inode	*ip)
269 {
270 	wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_INEW_BIT);
271 	DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_INEW_BIT);
272 
273 	do {
274 		prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
275 		if (!xfs_iflags_test(ip, XFS_INEW))
276 			break;
277 		schedule();
278 	} while (true);
279 	finish_wait(wq, &wait.wait);
280 }
281 
282 /*
283  * When we recycle a reclaimable inode, we need to re-initialise the VFS inode
284  * part of the structure. This is made more complex by the fact we store
285  * information about the on-disk values in the VFS inode and so we can't just
286  * overwrite the values unconditionally. Hence we save the parameters we
287  * need to retain across reinitialisation, and rewrite them into the VFS inode
288  * after reinitialisation even if it fails.
289  */
290 static int
xfs_reinit_inode(struct xfs_mount * mp,struct inode * inode)291 xfs_reinit_inode(
292 	struct xfs_mount	*mp,
293 	struct inode		*inode)
294 {
295 	int		error;
296 	uint32_t	nlink = inode->i_nlink;
297 	uint32_t	generation = inode->i_generation;
298 	uint64_t	version = inode->i_version;
299 	umode_t		mode = inode->i_mode;
300 
301 	error = inode_init_always(mp->m_super, inode);
302 
303 	set_nlink(inode, nlink);
304 	inode->i_generation = generation;
305 	inode->i_version = version;
306 	inode->i_mode = mode;
307 	return error;
308 }
309 
310 /*
311  * Check the validity of the inode we just found it the cache
312  */
313 static int
xfs_iget_cache_hit(struct xfs_perag * pag,struct xfs_inode * ip,xfs_ino_t ino,int flags,int lock_flags)314 xfs_iget_cache_hit(
315 	struct xfs_perag	*pag,
316 	struct xfs_inode	*ip,
317 	xfs_ino_t		ino,
318 	int			flags,
319 	int			lock_flags) __releases(RCU)
320 {
321 	struct inode		*inode = VFS_I(ip);
322 	struct xfs_mount	*mp = ip->i_mount;
323 	int			error;
324 
325 	/*
326 	 * check for re-use of an inode within an RCU grace period due to the
327 	 * radix tree nodes not being updated yet. We monitor for this by
328 	 * setting the inode number to zero before freeing the inode structure.
329 	 * If the inode has been reallocated and set up, then the inode number
330 	 * will not match, so check for that, too.
331 	 */
332 	spin_lock(&ip->i_flags_lock);
333 	if (ip->i_ino != ino) {
334 		trace_xfs_iget_skip(ip);
335 		XFS_STATS_INC(mp, xs_ig_frecycle);
336 		error = -EAGAIN;
337 		goto out_error;
338 	}
339 
340 
341 	/*
342 	 * If we are racing with another cache hit that is currently
343 	 * instantiating this inode or currently recycling it out of
344 	 * reclaimabe state, wait for the initialisation to complete
345 	 * before continuing.
346 	 *
347 	 * XXX(hch): eventually we should do something equivalent to
348 	 *	     wait_on_inode to wait for these flags to be cleared
349 	 *	     instead of polling for it.
350 	 */
351 	if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) {
352 		trace_xfs_iget_skip(ip);
353 		XFS_STATS_INC(mp, xs_ig_frecycle);
354 		error = -EAGAIN;
355 		goto out_error;
356 	}
357 
358 	/*
359 	 * If lookup is racing with unlink return an error immediately.
360 	 */
361 	if (VFS_I(ip)->i_mode == 0 && !(flags & XFS_IGET_CREATE)) {
362 		error = -ENOENT;
363 		goto out_error;
364 	}
365 
366 	/*
367 	 * If IRECLAIMABLE is set, we've torn down the VFS inode already.
368 	 * Need to carefully get it back into useable state.
369 	 */
370 	if (ip->i_flags & XFS_IRECLAIMABLE) {
371 		trace_xfs_iget_reclaim(ip);
372 
373 		/*
374 		 * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode
375 		 * from stomping over us while we recycle the inode.  We can't
376 		 * clear the radix tree reclaimable tag yet as it requires
377 		 * pag_ici_lock to be held exclusive.
378 		 */
379 		ip->i_flags |= XFS_IRECLAIM;
380 
381 		spin_unlock(&ip->i_flags_lock);
382 		rcu_read_unlock();
383 
384 		error = xfs_reinit_inode(mp, inode);
385 		if (error) {
386 			bool wake;
387 			/*
388 			 * Re-initializing the inode failed, and we are in deep
389 			 * trouble.  Try to re-add it to the reclaim list.
390 			 */
391 			rcu_read_lock();
392 			spin_lock(&ip->i_flags_lock);
393 			wake = !!__xfs_iflags_test(ip, XFS_INEW);
394 			ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM);
395 			if (wake)
396 				wake_up_bit(&ip->i_flags, __XFS_INEW_BIT);
397 			ASSERT(ip->i_flags & XFS_IRECLAIMABLE);
398 			trace_xfs_iget_reclaim_fail(ip);
399 			goto out_error;
400 		}
401 
402 		spin_lock(&pag->pag_ici_lock);
403 		spin_lock(&ip->i_flags_lock);
404 
405 		/*
406 		 * Clear the per-lifetime state in the inode as we are now
407 		 * effectively a new inode and need to return to the initial
408 		 * state before reuse occurs.
409 		 */
410 		ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS;
411 		ip->i_flags |= XFS_INEW;
412 		xfs_inode_clear_reclaim_tag(pag, ip->i_ino);
413 		inode->i_state = I_NEW;
414 
415 		ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock));
416 		mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
417 
418 		spin_unlock(&ip->i_flags_lock);
419 		spin_unlock(&pag->pag_ici_lock);
420 	} else {
421 		/* If the VFS inode is being torn down, pause and try again. */
422 		if (!igrab(inode)) {
423 			trace_xfs_iget_skip(ip);
424 			error = -EAGAIN;
425 			goto out_error;
426 		}
427 
428 		/* We've got a live one. */
429 		spin_unlock(&ip->i_flags_lock);
430 		rcu_read_unlock();
431 		trace_xfs_iget_hit(ip);
432 	}
433 
434 	if (lock_flags != 0)
435 		xfs_ilock(ip, lock_flags);
436 
437 	xfs_iflags_clear(ip, XFS_ISTALE | XFS_IDONTCACHE);
438 	XFS_STATS_INC(mp, xs_ig_found);
439 
440 	return 0;
441 
442 out_error:
443 	spin_unlock(&ip->i_flags_lock);
444 	rcu_read_unlock();
445 	return error;
446 }
447 
448 
449 static int
xfs_iget_cache_miss(struct xfs_mount * mp,struct xfs_perag * pag,xfs_trans_t * tp,xfs_ino_t ino,struct xfs_inode ** ipp,int flags,int lock_flags)450 xfs_iget_cache_miss(
451 	struct xfs_mount	*mp,
452 	struct xfs_perag	*pag,
453 	xfs_trans_t		*tp,
454 	xfs_ino_t		ino,
455 	struct xfs_inode	**ipp,
456 	int			flags,
457 	int			lock_flags)
458 {
459 	struct xfs_inode	*ip;
460 	int			error;
461 	xfs_agino_t		agino = XFS_INO_TO_AGINO(mp, ino);
462 	int			iflags;
463 
464 	ip = xfs_inode_alloc(mp, ino);
465 	if (!ip)
466 		return -ENOMEM;
467 
468 	error = xfs_iread(mp, tp, ip, flags);
469 	if (error)
470 		goto out_destroy;
471 
472 	trace_xfs_iget_miss(ip);
473 
474 	if ((VFS_I(ip)->i_mode == 0) && !(flags & XFS_IGET_CREATE)) {
475 		error = -ENOENT;
476 		goto out_destroy;
477 	}
478 
479 	/*
480 	 * Preload the radix tree so we can insert safely under the
481 	 * write spinlock. Note that we cannot sleep inside the preload
482 	 * region. Since we can be called from transaction context, don't
483 	 * recurse into the file system.
484 	 */
485 	if (radix_tree_preload(GFP_NOFS)) {
486 		error = -EAGAIN;
487 		goto out_destroy;
488 	}
489 
490 	/*
491 	 * Because the inode hasn't been added to the radix-tree yet it can't
492 	 * be found by another thread, so we can do the non-sleeping lock here.
493 	 */
494 	if (lock_flags) {
495 		if (!xfs_ilock_nowait(ip, lock_flags))
496 			BUG();
497 	}
498 
499 	/*
500 	 * These values must be set before inserting the inode into the radix
501 	 * tree as the moment it is inserted a concurrent lookup (allowed by the
502 	 * RCU locking mechanism) can find it and that lookup must see that this
503 	 * is an inode currently under construction (i.e. that XFS_INEW is set).
504 	 * The ip->i_flags_lock that protects the XFS_INEW flag forms the
505 	 * memory barrier that ensures this detection works correctly at lookup
506 	 * time.
507 	 */
508 	iflags = XFS_INEW;
509 	if (flags & XFS_IGET_DONTCACHE)
510 		iflags |= XFS_IDONTCACHE;
511 	ip->i_udquot = NULL;
512 	ip->i_gdquot = NULL;
513 	ip->i_pdquot = NULL;
514 	xfs_iflags_set(ip, iflags);
515 
516 	/* insert the new inode */
517 	spin_lock(&pag->pag_ici_lock);
518 	error = radix_tree_insert(&pag->pag_ici_root, agino, ip);
519 	if (unlikely(error)) {
520 		WARN_ON(error != -EEXIST);
521 		XFS_STATS_INC(mp, xs_ig_dup);
522 		error = -EAGAIN;
523 		goto out_preload_end;
524 	}
525 	spin_unlock(&pag->pag_ici_lock);
526 	radix_tree_preload_end();
527 
528 	*ipp = ip;
529 	return 0;
530 
531 out_preload_end:
532 	spin_unlock(&pag->pag_ici_lock);
533 	radix_tree_preload_end();
534 	if (lock_flags)
535 		xfs_iunlock(ip, lock_flags);
536 out_destroy:
537 	__destroy_inode(VFS_I(ip));
538 	xfs_inode_free(ip);
539 	return error;
540 }
541 
542 /*
543  * Look up an inode by number in the given file system.
544  * The inode is looked up in the cache held in each AG.
545  * If the inode is found in the cache, initialise the vfs inode
546  * if necessary.
547  *
548  * If it is not in core, read it in from the file system's device,
549  * add it to the cache and initialise the vfs inode.
550  *
551  * The inode is locked according to the value of the lock_flags parameter.
552  * This flag parameter indicates how and if the inode's IO lock and inode lock
553  * should be taken.
554  *
555  * mp -- the mount point structure for the current file system.  It points
556  *       to the inode hash table.
557  * tp -- a pointer to the current transaction if there is one.  This is
558  *       simply passed through to the xfs_iread() call.
559  * ino -- the number of the inode desired.  This is the unique identifier
560  *        within the file system for the inode being requested.
561  * lock_flags -- flags indicating how to lock the inode.  See the comment
562  *		 for xfs_ilock() for a list of valid values.
563  */
564 int
xfs_iget(xfs_mount_t * mp,xfs_trans_t * tp,xfs_ino_t ino,uint flags,uint lock_flags,xfs_inode_t ** ipp)565 xfs_iget(
566 	xfs_mount_t	*mp,
567 	xfs_trans_t	*tp,
568 	xfs_ino_t	ino,
569 	uint		flags,
570 	uint		lock_flags,
571 	xfs_inode_t	**ipp)
572 {
573 	xfs_inode_t	*ip;
574 	int		error;
575 	xfs_perag_t	*pag;
576 	xfs_agino_t	agino;
577 
578 	/*
579 	 * xfs_reclaim_inode() uses the ILOCK to ensure an inode
580 	 * doesn't get freed while it's being referenced during a
581 	 * radix tree traversal here.  It assumes this function
582 	 * aqcuires only the ILOCK (and therefore it has no need to
583 	 * involve the IOLOCK in this synchronization).
584 	 */
585 	ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0);
586 
587 	/* reject inode numbers outside existing AGs */
588 	if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount)
589 		return -EINVAL;
590 
591 	XFS_STATS_INC(mp, xs_ig_attempts);
592 
593 	/* get the perag structure and ensure that it's inode capable */
594 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino));
595 	agino = XFS_INO_TO_AGINO(mp, ino);
596 
597 again:
598 	error = 0;
599 	rcu_read_lock();
600 	ip = radix_tree_lookup(&pag->pag_ici_root, agino);
601 
602 	if (ip) {
603 		error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags);
604 		if (error)
605 			goto out_error_or_again;
606 	} else {
607 		rcu_read_unlock();
608 		XFS_STATS_INC(mp, xs_ig_missed);
609 
610 		error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip,
611 							flags, lock_flags);
612 		if (error)
613 			goto out_error_or_again;
614 	}
615 	xfs_perag_put(pag);
616 
617 	*ipp = ip;
618 
619 	/*
620 	 * If we have a real type for an on-disk inode, we can setup the inode
621 	 * now.	 If it's a new inode being created, xfs_ialloc will handle it.
622 	 */
623 	if (xfs_iflags_test(ip, XFS_INEW) && VFS_I(ip)->i_mode != 0)
624 		xfs_setup_existing_inode(ip);
625 	return 0;
626 
627 out_error_or_again:
628 	if (error == -EAGAIN) {
629 		delay(1);
630 		goto again;
631 	}
632 	xfs_perag_put(pag);
633 	return error;
634 }
635 
636 /*
637  * The inode lookup is done in batches to keep the amount of lock traffic and
638  * radix tree lookups to a minimum. The batch size is a trade off between
639  * lookup reduction and stack usage. This is in the reclaim path, so we can't
640  * be too greedy.
641  */
642 #define XFS_LOOKUP_BATCH	32
643 
644 STATIC int
xfs_inode_ag_walk_grab(struct xfs_inode * ip,int flags)645 xfs_inode_ag_walk_grab(
646 	struct xfs_inode	*ip,
647 	int			flags)
648 {
649 	struct inode		*inode = VFS_I(ip);
650 	bool			newinos = !!(flags & XFS_AGITER_INEW_WAIT);
651 
652 	ASSERT(rcu_read_lock_held());
653 
654 	/*
655 	 * check for stale RCU freed inode
656 	 *
657 	 * If the inode has been reallocated, it doesn't matter if it's not in
658 	 * the AG we are walking - we are walking for writeback, so if it
659 	 * passes all the "valid inode" checks and is dirty, then we'll write
660 	 * it back anyway.  If it has been reallocated and still being
661 	 * initialised, the XFS_INEW check below will catch it.
662 	 */
663 	spin_lock(&ip->i_flags_lock);
664 	if (!ip->i_ino)
665 		goto out_unlock_noent;
666 
667 	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
668 	if ((!newinos && __xfs_iflags_test(ip, XFS_INEW)) ||
669 	    __xfs_iflags_test(ip, XFS_IRECLAIMABLE | XFS_IRECLAIM))
670 		goto out_unlock_noent;
671 	spin_unlock(&ip->i_flags_lock);
672 
673 	/* nothing to sync during shutdown */
674 	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
675 		return -EFSCORRUPTED;
676 
677 	/* If we can't grab the inode, it must on it's way to reclaim. */
678 	if (!igrab(inode))
679 		return -ENOENT;
680 
681 	/* inode is valid */
682 	return 0;
683 
684 out_unlock_noent:
685 	spin_unlock(&ip->i_flags_lock);
686 	return -ENOENT;
687 }
688 
689 STATIC int
xfs_inode_ag_walk(struct xfs_mount * mp,struct xfs_perag * pag,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args,int tag,int iter_flags)690 xfs_inode_ag_walk(
691 	struct xfs_mount	*mp,
692 	struct xfs_perag	*pag,
693 	int			(*execute)(struct xfs_inode *ip, int flags,
694 					   void *args),
695 	int			flags,
696 	void			*args,
697 	int			tag,
698 	int			iter_flags)
699 {
700 	uint32_t		first_index;
701 	int			last_error = 0;
702 	int			skipped;
703 	int			done;
704 	int			nr_found;
705 
706 restart:
707 	done = 0;
708 	skipped = 0;
709 	first_index = 0;
710 	nr_found = 0;
711 	do {
712 		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
713 		int		error = 0;
714 		int		i;
715 
716 		rcu_read_lock();
717 
718 		if (tag == -1)
719 			nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
720 					(void **)batch, first_index,
721 					XFS_LOOKUP_BATCH);
722 		else
723 			nr_found = radix_tree_gang_lookup_tag(
724 					&pag->pag_ici_root,
725 					(void **) batch, first_index,
726 					XFS_LOOKUP_BATCH, tag);
727 
728 		if (!nr_found) {
729 			rcu_read_unlock();
730 			break;
731 		}
732 
733 		/*
734 		 * Grab the inodes before we drop the lock. if we found
735 		 * nothing, nr == 0 and the loop will be skipped.
736 		 */
737 		for (i = 0; i < nr_found; i++) {
738 			struct xfs_inode *ip = batch[i];
739 
740 			if (done || xfs_inode_ag_walk_grab(ip, iter_flags))
741 				batch[i] = NULL;
742 
743 			/*
744 			 * Update the index for the next lookup. Catch
745 			 * overflows into the next AG range which can occur if
746 			 * we have inodes in the last block of the AG and we
747 			 * are currently pointing to the last inode.
748 			 *
749 			 * Because we may see inodes that are from the wrong AG
750 			 * due to RCU freeing and reallocation, only update the
751 			 * index if it lies in this AG. It was a race that lead
752 			 * us to see this inode, so another lookup from the
753 			 * same index will not find it again.
754 			 */
755 			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
756 				continue;
757 			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
758 			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
759 				done = 1;
760 		}
761 
762 		/* unlock now we've grabbed the inodes. */
763 		rcu_read_unlock();
764 
765 		for (i = 0; i < nr_found; i++) {
766 			if (!batch[i])
767 				continue;
768 			if ((iter_flags & XFS_AGITER_INEW_WAIT) &&
769 			    xfs_iflags_test(batch[i], XFS_INEW))
770 				xfs_inew_wait(batch[i]);
771 			error = execute(batch[i], flags, args);
772 			IRELE(batch[i]);
773 			if (error == -EAGAIN) {
774 				skipped++;
775 				continue;
776 			}
777 			if (error && last_error != -EFSCORRUPTED)
778 				last_error = error;
779 		}
780 
781 		/* bail out if the filesystem is corrupted.  */
782 		if (error == -EFSCORRUPTED)
783 			break;
784 
785 		cond_resched();
786 
787 	} while (nr_found && !done);
788 
789 	if (skipped) {
790 		delay(1);
791 		goto restart;
792 	}
793 	return last_error;
794 }
795 
796 /*
797  * Background scanning to trim post-EOF preallocated space. This is queued
798  * based on the 'speculative_prealloc_lifetime' tunable (5m by default).
799  */
800 void
xfs_queue_eofblocks(struct xfs_mount * mp)801 xfs_queue_eofblocks(
802 	struct xfs_mount *mp)
803 {
804 	rcu_read_lock();
805 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_EOFBLOCKS_TAG))
806 		queue_delayed_work(mp->m_eofblocks_workqueue,
807 				   &mp->m_eofblocks_work,
808 				   msecs_to_jiffies(xfs_eofb_secs * 1000));
809 	rcu_read_unlock();
810 }
811 
812 void
xfs_eofblocks_worker(struct work_struct * work)813 xfs_eofblocks_worker(
814 	struct work_struct *work)
815 {
816 	struct xfs_mount *mp = container_of(to_delayed_work(work),
817 				struct xfs_mount, m_eofblocks_work);
818 	xfs_icache_free_eofblocks(mp, NULL);
819 	xfs_queue_eofblocks(mp);
820 }
821 
822 /*
823  * Background scanning to trim preallocated CoW space. This is queued
824  * based on the 'speculative_cow_prealloc_lifetime' tunable (5m by default).
825  * (We'll just piggyback on the post-EOF prealloc space workqueue.)
826  */
827 STATIC void
xfs_queue_cowblocks(struct xfs_mount * mp)828 xfs_queue_cowblocks(
829 	struct xfs_mount *mp)
830 {
831 	rcu_read_lock();
832 	if (radix_tree_tagged(&mp->m_perag_tree, XFS_ICI_COWBLOCKS_TAG))
833 		queue_delayed_work(mp->m_eofblocks_workqueue,
834 				   &mp->m_cowblocks_work,
835 				   msecs_to_jiffies(xfs_cowb_secs * 1000));
836 	rcu_read_unlock();
837 }
838 
839 void
xfs_cowblocks_worker(struct work_struct * work)840 xfs_cowblocks_worker(
841 	struct work_struct *work)
842 {
843 	struct xfs_mount *mp = container_of(to_delayed_work(work),
844 				struct xfs_mount, m_cowblocks_work);
845 	xfs_icache_free_cowblocks(mp, NULL);
846 	xfs_queue_cowblocks(mp);
847 }
848 
849 int
xfs_inode_ag_iterator_flags(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args,int iter_flags)850 xfs_inode_ag_iterator_flags(
851 	struct xfs_mount	*mp,
852 	int			(*execute)(struct xfs_inode *ip, int flags,
853 					   void *args),
854 	int			flags,
855 	void			*args,
856 	int			iter_flags)
857 {
858 	struct xfs_perag	*pag;
859 	int			error = 0;
860 	int			last_error = 0;
861 	xfs_agnumber_t		ag;
862 
863 	ag = 0;
864 	while ((pag = xfs_perag_get(mp, ag))) {
865 		ag = pag->pag_agno + 1;
866 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, -1,
867 					  iter_flags);
868 		xfs_perag_put(pag);
869 		if (error) {
870 			last_error = error;
871 			if (error == -EFSCORRUPTED)
872 				break;
873 		}
874 	}
875 	return last_error;
876 }
877 
878 int
xfs_inode_ag_iterator(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args)879 xfs_inode_ag_iterator(
880 	struct xfs_mount	*mp,
881 	int			(*execute)(struct xfs_inode *ip, int flags,
882 					   void *args),
883 	int			flags,
884 	void			*args)
885 {
886 	return xfs_inode_ag_iterator_flags(mp, execute, flags, args, 0);
887 }
888 
889 int
xfs_inode_ag_iterator_tag(struct xfs_mount * mp,int (* execute)(struct xfs_inode * ip,int flags,void * args),int flags,void * args,int tag)890 xfs_inode_ag_iterator_tag(
891 	struct xfs_mount	*mp,
892 	int			(*execute)(struct xfs_inode *ip, int flags,
893 					   void *args),
894 	int			flags,
895 	void			*args,
896 	int			tag)
897 {
898 	struct xfs_perag	*pag;
899 	int			error = 0;
900 	int			last_error = 0;
901 	xfs_agnumber_t		ag;
902 
903 	ag = 0;
904 	while ((pag = xfs_perag_get_tag(mp, ag, tag))) {
905 		ag = pag->pag_agno + 1;
906 		error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag,
907 					  0);
908 		xfs_perag_put(pag);
909 		if (error) {
910 			last_error = error;
911 			if (error == -EFSCORRUPTED)
912 				break;
913 		}
914 	}
915 	return last_error;
916 }
917 
918 /*
919  * Grab the inode for reclaim exclusively.
920  * Return 0 if we grabbed it, non-zero otherwise.
921  */
922 STATIC int
xfs_reclaim_inode_grab(struct xfs_inode * ip,int flags)923 xfs_reclaim_inode_grab(
924 	struct xfs_inode	*ip,
925 	int			flags)
926 {
927 	ASSERT(rcu_read_lock_held());
928 
929 	/* quick check for stale RCU freed inode */
930 	if (!ip->i_ino)
931 		return 1;
932 
933 	/*
934 	 * If we are asked for non-blocking operation, do unlocked checks to
935 	 * see if the inode already is being flushed or in reclaim to avoid
936 	 * lock traffic.
937 	 */
938 	if ((flags & SYNC_TRYLOCK) &&
939 	    __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
940 		return 1;
941 
942 	/*
943 	 * The radix tree lock here protects a thread in xfs_iget from racing
944 	 * with us starting reclaim on the inode.  Once we have the
945 	 * XFS_IRECLAIM flag set it will not touch us.
946 	 *
947 	 * Due to RCU lookup, we may find inodes that have been freed and only
948 	 * have XFS_IRECLAIM set.  Indeed, we may see reallocated inodes that
949 	 * aren't candidates for reclaim at all, so we must check the
950 	 * XFS_IRECLAIMABLE is set first before proceeding to reclaim.
951 	 */
952 	spin_lock(&ip->i_flags_lock);
953 	if (!__xfs_iflags_test(ip, XFS_IRECLAIMABLE) ||
954 	    __xfs_iflags_test(ip, XFS_IRECLAIM)) {
955 		/* not a reclaim candidate. */
956 		spin_unlock(&ip->i_flags_lock);
957 		return 1;
958 	}
959 	__xfs_iflags_set(ip, XFS_IRECLAIM);
960 	spin_unlock(&ip->i_flags_lock);
961 	return 0;
962 }
963 
964 /*
965  * Inodes in different states need to be treated differently. The following
966  * table lists the inode states and the reclaim actions necessary:
967  *
968  *	inode state	     iflush ret		required action
969  *      ---------------      ----------         ---------------
970  *	bad			-		reclaim
971  *	shutdown		EIO		unpin and reclaim
972  *	clean, unpinned		0		reclaim
973  *	stale, unpinned		0		reclaim
974  *	clean, pinned(*)	0		requeue
975  *	stale, pinned		EAGAIN		requeue
976  *	dirty, async		-		requeue
977  *	dirty, sync		0		reclaim
978  *
979  * (*) dgc: I don't think the clean, pinned state is possible but it gets
980  * handled anyway given the order of checks implemented.
981  *
982  * Also, because we get the flush lock first, we know that any inode that has
983  * been flushed delwri has had the flush completed by the time we check that
984  * the inode is clean.
985  *
986  * Note that because the inode is flushed delayed write by AIL pushing, the
987  * flush lock may already be held here and waiting on it can result in very
988  * long latencies.  Hence for sync reclaims, where we wait on the flush lock,
989  * the caller should push the AIL first before trying to reclaim inodes to
990  * minimise the amount of time spent waiting.  For background relaim, we only
991  * bother to reclaim clean inodes anyway.
992  *
993  * Hence the order of actions after gaining the locks should be:
994  *	bad		=> reclaim
995  *	shutdown	=> unpin and reclaim
996  *	pinned, async	=> requeue
997  *	pinned, sync	=> unpin
998  *	stale		=> reclaim
999  *	clean		=> reclaim
1000  *	dirty, async	=> requeue
1001  *	dirty, sync	=> flush, wait and reclaim
1002  */
1003 STATIC int
xfs_reclaim_inode(struct xfs_inode * ip,struct xfs_perag * pag,int sync_mode)1004 xfs_reclaim_inode(
1005 	struct xfs_inode	*ip,
1006 	struct xfs_perag	*pag,
1007 	int			sync_mode)
1008 {
1009 	struct xfs_buf		*bp = NULL;
1010 	xfs_ino_t		ino = ip->i_ino; /* for radix_tree_delete */
1011 	int			error;
1012 
1013 restart:
1014 	error = 0;
1015 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1016 	if (!xfs_iflock_nowait(ip)) {
1017 		if (!(sync_mode & SYNC_WAIT))
1018 			goto out;
1019 		xfs_iflock(ip);
1020 	}
1021 
1022 	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
1023 		xfs_iunpin_wait(ip);
1024 		/* xfs_iflush_abort() drops the flush lock */
1025 		xfs_iflush_abort(ip, false);
1026 		goto reclaim;
1027 	}
1028 	if (xfs_ipincount(ip)) {
1029 		if (!(sync_mode & SYNC_WAIT))
1030 			goto out_ifunlock;
1031 		xfs_iunpin_wait(ip);
1032 	}
1033 	if (xfs_iflags_test(ip, XFS_ISTALE) || xfs_inode_clean(ip)) {
1034 		xfs_ifunlock(ip);
1035 		goto reclaim;
1036 	}
1037 
1038 	/*
1039 	 * Never flush out dirty data during non-blocking reclaim, as it would
1040 	 * just contend with AIL pushing trying to do the same job.
1041 	 */
1042 	if (!(sync_mode & SYNC_WAIT))
1043 		goto out_ifunlock;
1044 
1045 	/*
1046 	 * Now we have an inode that needs flushing.
1047 	 *
1048 	 * Note that xfs_iflush will never block on the inode buffer lock, as
1049 	 * xfs_ifree_cluster() can lock the inode buffer before it locks the
1050 	 * ip->i_lock, and we are doing the exact opposite here.  As a result,
1051 	 * doing a blocking xfs_imap_to_bp() to get the cluster buffer would
1052 	 * result in an ABBA deadlock with xfs_ifree_cluster().
1053 	 *
1054 	 * As xfs_ifree_cluser() must gather all inodes that are active in the
1055 	 * cache to mark them stale, if we hit this case we don't actually want
1056 	 * to do IO here - we want the inode marked stale so we can simply
1057 	 * reclaim it.  Hence if we get an EAGAIN error here,  just unlock the
1058 	 * inode, back off and try again.  Hopefully the next pass through will
1059 	 * see the stale flag set on the inode.
1060 	 */
1061 	error = xfs_iflush(ip, &bp);
1062 	if (error == -EAGAIN) {
1063 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
1064 		/* backoff longer than in xfs_ifree_cluster */
1065 		delay(2);
1066 		goto restart;
1067 	}
1068 
1069 	if (!error) {
1070 		error = xfs_bwrite(bp);
1071 		xfs_buf_relse(bp);
1072 	}
1073 
1074 reclaim:
1075 	ASSERT(!xfs_isiflocked(ip));
1076 
1077 	/*
1078 	 * Because we use RCU freeing we need to ensure the inode always appears
1079 	 * to be reclaimed with an invalid inode number when in the free state.
1080 	 * We do this as early as possible under the ILOCK so that
1081 	 * xfs_iflush_cluster() and xfs_ifree_cluster() can be guaranteed to
1082 	 * detect races with us here. By doing this, we guarantee that once
1083 	 * xfs_iflush_cluster() or xfs_ifree_cluster() has locked XFS_ILOCK that
1084 	 * it will see either a valid inode that will serialise correctly, or it
1085 	 * will see an invalid inode that it can skip.
1086 	 */
1087 	spin_lock(&ip->i_flags_lock);
1088 	ip->i_flags = XFS_IRECLAIM;
1089 	ip->i_ino = 0;
1090 	spin_unlock(&ip->i_flags_lock);
1091 
1092 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1093 
1094 	XFS_STATS_INC(ip->i_mount, xs_ig_reclaims);
1095 	/*
1096 	 * Remove the inode from the per-AG radix tree.
1097 	 *
1098 	 * Because radix_tree_delete won't complain even if the item was never
1099 	 * added to the tree assert that it's been there before to catch
1100 	 * problems with the inode life time early on.
1101 	 */
1102 	spin_lock(&pag->pag_ici_lock);
1103 	if (!radix_tree_delete(&pag->pag_ici_root,
1104 				XFS_INO_TO_AGINO(ip->i_mount, ino)))
1105 		ASSERT(0);
1106 	xfs_perag_clear_reclaim_tag(pag);
1107 	spin_unlock(&pag->pag_ici_lock);
1108 
1109 	/*
1110 	 * Here we do an (almost) spurious inode lock in order to coordinate
1111 	 * with inode cache radix tree lookups.  This is because the lookup
1112 	 * can reference the inodes in the cache without taking references.
1113 	 *
1114 	 * We make that OK here by ensuring that we wait until the inode is
1115 	 * unlocked after the lookup before we go ahead and free it.
1116 	 */
1117 	xfs_ilock(ip, XFS_ILOCK_EXCL);
1118 	xfs_qm_dqdetach(ip);
1119 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1120 
1121 	__xfs_inode_free(ip);
1122 	return error;
1123 
1124 out_ifunlock:
1125 	xfs_ifunlock(ip);
1126 out:
1127 	xfs_iflags_clear(ip, XFS_IRECLAIM);
1128 	xfs_iunlock(ip, XFS_ILOCK_EXCL);
1129 	/*
1130 	 * We could return -EAGAIN here to make reclaim rescan the inode tree in
1131 	 * a short while. However, this just burns CPU time scanning the tree
1132 	 * waiting for IO to complete and the reclaim work never goes back to
1133 	 * the idle state. Instead, return 0 to let the next scheduled
1134 	 * background reclaim attempt to reclaim the inode again.
1135 	 */
1136 	return 0;
1137 }
1138 
1139 /*
1140  * Walk the AGs and reclaim the inodes in them. Even if the filesystem is
1141  * corrupted, we still want to try to reclaim all the inodes. If we don't,
1142  * then a shut down during filesystem unmount reclaim walk leak all the
1143  * unreclaimed inodes.
1144  */
1145 STATIC int
xfs_reclaim_inodes_ag(struct xfs_mount * mp,int flags,int * nr_to_scan)1146 xfs_reclaim_inodes_ag(
1147 	struct xfs_mount	*mp,
1148 	int			flags,
1149 	int			*nr_to_scan)
1150 {
1151 	struct xfs_perag	*pag;
1152 	int			error = 0;
1153 	int			last_error = 0;
1154 	xfs_agnumber_t		ag;
1155 	int			trylock = flags & SYNC_TRYLOCK;
1156 	int			skipped;
1157 
1158 restart:
1159 	ag = 0;
1160 	skipped = 0;
1161 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1162 		unsigned long	first_index = 0;
1163 		int		done = 0;
1164 		int		nr_found = 0;
1165 
1166 		ag = pag->pag_agno + 1;
1167 
1168 		if (trylock) {
1169 			if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) {
1170 				skipped++;
1171 				xfs_perag_put(pag);
1172 				continue;
1173 			}
1174 			first_index = pag->pag_ici_reclaim_cursor;
1175 		} else
1176 			mutex_lock(&pag->pag_ici_reclaim_lock);
1177 
1178 		do {
1179 			struct xfs_inode *batch[XFS_LOOKUP_BATCH];
1180 			int	i;
1181 
1182 			rcu_read_lock();
1183 			nr_found = radix_tree_gang_lookup_tag(
1184 					&pag->pag_ici_root,
1185 					(void **)batch, first_index,
1186 					XFS_LOOKUP_BATCH,
1187 					XFS_ICI_RECLAIM_TAG);
1188 			if (!nr_found) {
1189 				done = 1;
1190 				rcu_read_unlock();
1191 				break;
1192 			}
1193 
1194 			/*
1195 			 * Grab the inodes before we drop the lock. if we found
1196 			 * nothing, nr == 0 and the loop will be skipped.
1197 			 */
1198 			for (i = 0; i < nr_found; i++) {
1199 				struct xfs_inode *ip = batch[i];
1200 
1201 				if (done || xfs_reclaim_inode_grab(ip, flags))
1202 					batch[i] = NULL;
1203 
1204 				/*
1205 				 * Update the index for the next lookup. Catch
1206 				 * overflows into the next AG range which can
1207 				 * occur if we have inodes in the last block of
1208 				 * the AG and we are currently pointing to the
1209 				 * last inode.
1210 				 *
1211 				 * Because we may see inodes that are from the
1212 				 * wrong AG due to RCU freeing and
1213 				 * reallocation, only update the index if it
1214 				 * lies in this AG. It was a race that lead us
1215 				 * to see this inode, so another lookup from
1216 				 * the same index will not find it again.
1217 				 */
1218 				if (XFS_INO_TO_AGNO(mp, ip->i_ino) !=
1219 								pag->pag_agno)
1220 					continue;
1221 				first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
1222 				if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
1223 					done = 1;
1224 			}
1225 
1226 			/* unlock now we've grabbed the inodes. */
1227 			rcu_read_unlock();
1228 
1229 			for (i = 0; i < nr_found; i++) {
1230 				if (!batch[i])
1231 					continue;
1232 				error = xfs_reclaim_inode(batch[i], pag, flags);
1233 				if (error && last_error != -EFSCORRUPTED)
1234 					last_error = error;
1235 			}
1236 
1237 			*nr_to_scan -= XFS_LOOKUP_BATCH;
1238 
1239 			cond_resched();
1240 
1241 		} while (nr_found && !done && *nr_to_scan > 0);
1242 
1243 		if (trylock && !done)
1244 			pag->pag_ici_reclaim_cursor = first_index;
1245 		else
1246 			pag->pag_ici_reclaim_cursor = 0;
1247 		mutex_unlock(&pag->pag_ici_reclaim_lock);
1248 		xfs_perag_put(pag);
1249 	}
1250 
1251 	/*
1252 	 * if we skipped any AG, and we still have scan count remaining, do
1253 	 * another pass this time using blocking reclaim semantics (i.e
1254 	 * waiting on the reclaim locks and ignoring the reclaim cursors). This
1255 	 * ensure that when we get more reclaimers than AGs we block rather
1256 	 * than spin trying to execute reclaim.
1257 	 */
1258 	if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) {
1259 		trylock = 0;
1260 		goto restart;
1261 	}
1262 	return last_error;
1263 }
1264 
1265 int
xfs_reclaim_inodes(xfs_mount_t * mp,int mode)1266 xfs_reclaim_inodes(
1267 	xfs_mount_t	*mp,
1268 	int		mode)
1269 {
1270 	int		nr_to_scan = INT_MAX;
1271 
1272 	return xfs_reclaim_inodes_ag(mp, mode, &nr_to_scan);
1273 }
1274 
1275 /*
1276  * Scan a certain number of inodes for reclaim.
1277  *
1278  * When called we make sure that there is a background (fast) inode reclaim in
1279  * progress, while we will throttle the speed of reclaim via doing synchronous
1280  * reclaim of inodes. That means if we come across dirty inodes, we wait for
1281  * them to be cleaned, which we hope will not be very long due to the
1282  * background walker having already kicked the IO off on those dirty inodes.
1283  */
1284 long
xfs_reclaim_inodes_nr(struct xfs_mount * mp,int nr_to_scan)1285 xfs_reclaim_inodes_nr(
1286 	struct xfs_mount	*mp,
1287 	int			nr_to_scan)
1288 {
1289 	/* kick background reclaimer and push the AIL */
1290 	xfs_reclaim_work_queue(mp);
1291 	xfs_ail_push_all(mp->m_ail);
1292 
1293 	return xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan);
1294 }
1295 
1296 /*
1297  * Return the number of reclaimable inodes in the filesystem for
1298  * the shrinker to determine how much to reclaim.
1299  */
1300 int
xfs_reclaim_inodes_count(struct xfs_mount * mp)1301 xfs_reclaim_inodes_count(
1302 	struct xfs_mount	*mp)
1303 {
1304 	struct xfs_perag	*pag;
1305 	xfs_agnumber_t		ag = 0;
1306 	int			reclaimable = 0;
1307 
1308 	while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) {
1309 		ag = pag->pag_agno + 1;
1310 		reclaimable += pag->pag_ici_reclaimable;
1311 		xfs_perag_put(pag);
1312 	}
1313 	return reclaimable;
1314 }
1315 
1316 STATIC int
xfs_inode_match_id(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1317 xfs_inode_match_id(
1318 	struct xfs_inode	*ip,
1319 	struct xfs_eofblocks	*eofb)
1320 {
1321 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1322 	    !uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1323 		return 0;
1324 
1325 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1326 	    !gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1327 		return 0;
1328 
1329 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1330 	    xfs_get_projid(ip) != eofb->eof_prid)
1331 		return 0;
1332 
1333 	return 1;
1334 }
1335 
1336 /*
1337  * A union-based inode filtering algorithm. Process the inode if any of the
1338  * criteria match. This is for global/internal scans only.
1339  */
1340 STATIC int
xfs_inode_match_id_union(struct xfs_inode * ip,struct xfs_eofblocks * eofb)1341 xfs_inode_match_id_union(
1342 	struct xfs_inode	*ip,
1343 	struct xfs_eofblocks	*eofb)
1344 {
1345 	if ((eofb->eof_flags & XFS_EOF_FLAGS_UID) &&
1346 	    uid_eq(VFS_I(ip)->i_uid, eofb->eof_uid))
1347 		return 1;
1348 
1349 	if ((eofb->eof_flags & XFS_EOF_FLAGS_GID) &&
1350 	    gid_eq(VFS_I(ip)->i_gid, eofb->eof_gid))
1351 		return 1;
1352 
1353 	if ((eofb->eof_flags & XFS_EOF_FLAGS_PRID) &&
1354 	    xfs_get_projid(ip) == eofb->eof_prid)
1355 		return 1;
1356 
1357 	return 0;
1358 }
1359 
1360 STATIC int
xfs_inode_free_eofblocks(struct xfs_inode * ip,int flags,void * args)1361 xfs_inode_free_eofblocks(
1362 	struct xfs_inode	*ip,
1363 	int			flags,
1364 	void			*args)
1365 {
1366 	int ret = 0;
1367 	struct xfs_eofblocks *eofb = args;
1368 	int match;
1369 
1370 	if (!xfs_can_free_eofblocks(ip, false)) {
1371 		/* inode could be preallocated or append-only */
1372 		trace_xfs_inode_free_eofblocks_invalid(ip);
1373 		xfs_inode_clear_eofblocks_tag(ip);
1374 		return 0;
1375 	}
1376 
1377 	/*
1378 	 * If the mapping is dirty the operation can block and wait for some
1379 	 * time. Unless we are waiting, skip it.
1380 	 */
1381 	if (!(flags & SYNC_WAIT) &&
1382 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY))
1383 		return 0;
1384 
1385 	if (eofb) {
1386 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1387 			match = xfs_inode_match_id_union(ip, eofb);
1388 		else
1389 			match = xfs_inode_match_id(ip, eofb);
1390 		if (!match)
1391 			return 0;
1392 
1393 		/* skip the inode if the file size is too small */
1394 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1395 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1396 			return 0;
1397 	}
1398 
1399 	/*
1400 	 * If the caller is waiting, return -EAGAIN to keep the background
1401 	 * scanner moving and revisit the inode in a subsequent pass.
1402 	 */
1403 	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
1404 		if (flags & SYNC_WAIT)
1405 			ret = -EAGAIN;
1406 		return ret;
1407 	}
1408 	ret = xfs_free_eofblocks(ip);
1409 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1410 
1411 	return ret;
1412 }
1413 
1414 static int
__xfs_icache_free_eofblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb,int (* execute)(struct xfs_inode * ip,int flags,void * args),int tag)1415 __xfs_icache_free_eofblocks(
1416 	struct xfs_mount	*mp,
1417 	struct xfs_eofblocks	*eofb,
1418 	int			(*execute)(struct xfs_inode *ip, int flags,
1419 					   void *args),
1420 	int			tag)
1421 {
1422 	int flags = SYNC_TRYLOCK;
1423 
1424 	if (eofb && (eofb->eof_flags & XFS_EOF_FLAGS_SYNC))
1425 		flags = SYNC_WAIT;
1426 
1427 	return xfs_inode_ag_iterator_tag(mp, execute, flags,
1428 					 eofb, tag);
1429 }
1430 
1431 int
xfs_icache_free_eofblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1432 xfs_icache_free_eofblocks(
1433 	struct xfs_mount	*mp,
1434 	struct xfs_eofblocks	*eofb)
1435 {
1436 	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_eofblocks,
1437 			XFS_ICI_EOFBLOCKS_TAG);
1438 }
1439 
1440 /*
1441  * Run eofblocks scans on the quotas applicable to the inode. For inodes with
1442  * multiple quotas, we don't know exactly which quota caused an allocation
1443  * failure. We make a best effort by including each quota under low free space
1444  * conditions (less than 1% free space) in the scan.
1445  */
1446 static int
__xfs_inode_free_quota_eofblocks(struct xfs_inode * ip,int (* execute)(struct xfs_mount * mp,struct xfs_eofblocks * eofb))1447 __xfs_inode_free_quota_eofblocks(
1448 	struct xfs_inode	*ip,
1449 	int			(*execute)(struct xfs_mount *mp,
1450 					   struct xfs_eofblocks	*eofb))
1451 {
1452 	int scan = 0;
1453 	struct xfs_eofblocks eofb = {0};
1454 	struct xfs_dquot *dq;
1455 
1456 	/*
1457 	 * Run a sync scan to increase effectiveness and use the union filter to
1458 	 * cover all applicable quotas in a single scan.
1459 	 */
1460 	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
1461 
1462 	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
1463 		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
1464 		if (dq && xfs_dquot_lowsp(dq)) {
1465 			eofb.eof_uid = VFS_I(ip)->i_uid;
1466 			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
1467 			scan = 1;
1468 		}
1469 	}
1470 
1471 	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
1472 		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
1473 		if (dq && xfs_dquot_lowsp(dq)) {
1474 			eofb.eof_gid = VFS_I(ip)->i_gid;
1475 			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
1476 			scan = 1;
1477 		}
1478 	}
1479 
1480 	if (scan)
1481 		execute(ip->i_mount, &eofb);
1482 
1483 	return scan;
1484 }
1485 
1486 int
xfs_inode_free_quota_eofblocks(struct xfs_inode * ip)1487 xfs_inode_free_quota_eofblocks(
1488 	struct xfs_inode *ip)
1489 {
1490 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_eofblocks);
1491 }
1492 
1493 static void
__xfs_inode_set_eofblocks_tag(xfs_inode_t * ip,void (* execute)(struct xfs_mount * mp),void (* set_tp)(struct xfs_mount * mp,xfs_agnumber_t agno,int error,unsigned long caller_ip),int tag)1494 __xfs_inode_set_eofblocks_tag(
1495 	xfs_inode_t	*ip,
1496 	void		(*execute)(struct xfs_mount *mp),
1497 	void		(*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1498 				  int error, unsigned long caller_ip),
1499 	int		tag)
1500 {
1501 	struct xfs_mount *mp = ip->i_mount;
1502 	struct xfs_perag *pag;
1503 	int tagged;
1504 
1505 	/*
1506 	 * Don't bother locking the AG and looking up in the radix trees
1507 	 * if we already know that we have the tag set.
1508 	 */
1509 	if (ip->i_flags & XFS_IEOFBLOCKS)
1510 		return;
1511 	spin_lock(&ip->i_flags_lock);
1512 	ip->i_flags |= XFS_IEOFBLOCKS;
1513 	spin_unlock(&ip->i_flags_lock);
1514 
1515 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1516 	spin_lock(&pag->pag_ici_lock);
1517 
1518 	tagged = radix_tree_tagged(&pag->pag_ici_root, tag);
1519 	radix_tree_tag_set(&pag->pag_ici_root,
1520 			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1521 	if (!tagged) {
1522 		/* propagate the eofblocks tag up into the perag radix tree */
1523 		spin_lock(&ip->i_mount->m_perag_lock);
1524 		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
1525 				   XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1526 				   tag);
1527 		spin_unlock(&ip->i_mount->m_perag_lock);
1528 
1529 		/* kick off background trimming */
1530 		execute(ip->i_mount);
1531 
1532 		set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1533 	}
1534 
1535 	spin_unlock(&pag->pag_ici_lock);
1536 	xfs_perag_put(pag);
1537 }
1538 
1539 void
xfs_inode_set_eofblocks_tag(xfs_inode_t * ip)1540 xfs_inode_set_eofblocks_tag(
1541 	xfs_inode_t	*ip)
1542 {
1543 	trace_xfs_inode_set_eofblocks_tag(ip);
1544 	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_eofblocks,
1545 			trace_xfs_perag_set_eofblocks,
1546 			XFS_ICI_EOFBLOCKS_TAG);
1547 }
1548 
1549 static void
__xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip,void (* clear_tp)(struct xfs_mount * mp,xfs_agnumber_t agno,int error,unsigned long caller_ip),int tag)1550 __xfs_inode_clear_eofblocks_tag(
1551 	xfs_inode_t	*ip,
1552 	void		(*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno,
1553 				    int error, unsigned long caller_ip),
1554 	int		tag)
1555 {
1556 	struct xfs_mount *mp = ip->i_mount;
1557 	struct xfs_perag *pag;
1558 
1559 	spin_lock(&ip->i_flags_lock);
1560 	ip->i_flags &= ~XFS_IEOFBLOCKS;
1561 	spin_unlock(&ip->i_flags_lock);
1562 
1563 	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
1564 	spin_lock(&pag->pag_ici_lock);
1565 
1566 	radix_tree_tag_clear(&pag->pag_ici_root,
1567 			     XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag);
1568 	if (!radix_tree_tagged(&pag->pag_ici_root, tag)) {
1569 		/* clear the eofblocks tag from the perag radix tree */
1570 		spin_lock(&ip->i_mount->m_perag_lock);
1571 		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
1572 				     XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
1573 				     tag);
1574 		spin_unlock(&ip->i_mount->m_perag_lock);
1575 		clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_);
1576 	}
1577 
1578 	spin_unlock(&pag->pag_ici_lock);
1579 	xfs_perag_put(pag);
1580 }
1581 
1582 void
xfs_inode_clear_eofblocks_tag(xfs_inode_t * ip)1583 xfs_inode_clear_eofblocks_tag(
1584 	xfs_inode_t	*ip)
1585 {
1586 	trace_xfs_inode_clear_eofblocks_tag(ip);
1587 	return __xfs_inode_clear_eofblocks_tag(ip,
1588 			trace_xfs_perag_clear_eofblocks, XFS_ICI_EOFBLOCKS_TAG);
1589 }
1590 
1591 /*
1592  * Automatic CoW Reservation Freeing
1593  *
1594  * These functions automatically garbage collect leftover CoW reservations
1595  * that were made on behalf of a cowextsize hint when we start to run out
1596  * of quota or when the reservations sit around for too long.  If the file
1597  * has dirty pages or is undergoing writeback, its CoW reservations will
1598  * be retained.
1599  *
1600  * The actual garbage collection piggybacks off the same code that runs
1601  * the speculative EOF preallocation garbage collector.
1602  */
1603 STATIC int
xfs_inode_free_cowblocks(struct xfs_inode * ip,int flags,void * args)1604 xfs_inode_free_cowblocks(
1605 	struct xfs_inode	*ip,
1606 	int			flags,
1607 	void			*args)
1608 {
1609 	int ret;
1610 	struct xfs_eofblocks *eofb = args;
1611 	int match;
1612 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
1613 
1614 	/*
1615 	 * Just clear the tag if we have an empty cow fork or none at all. It's
1616 	 * possible the inode was fully unshared since it was originally tagged.
1617 	 */
1618 	if (!xfs_is_reflink_inode(ip) || !ifp->if_bytes) {
1619 		trace_xfs_inode_free_cowblocks_invalid(ip);
1620 		xfs_inode_clear_cowblocks_tag(ip);
1621 		return 0;
1622 	}
1623 
1624 	/*
1625 	 * If the mapping is dirty or under writeback we cannot touch the
1626 	 * CoW fork.  Leave it alone if we're in the midst of a directio.
1627 	 */
1628 	if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
1629 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
1630 	    mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
1631 	    atomic_read(&VFS_I(ip)->i_dio_count))
1632 		return 0;
1633 
1634 	if (eofb) {
1635 		if (eofb->eof_flags & XFS_EOF_FLAGS_UNION)
1636 			match = xfs_inode_match_id_union(ip, eofb);
1637 		else
1638 			match = xfs_inode_match_id(ip, eofb);
1639 		if (!match)
1640 			return 0;
1641 
1642 		/* skip the inode if the file size is too small */
1643 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
1644 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
1645 			return 0;
1646 	}
1647 
1648 	/* Free the CoW blocks */
1649 	xfs_ilock(ip, XFS_IOLOCK_EXCL);
1650 	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
1651 
1652 	ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
1653 
1654 	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
1655 	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
1656 
1657 	return ret;
1658 }
1659 
1660 int
xfs_icache_free_cowblocks(struct xfs_mount * mp,struct xfs_eofblocks * eofb)1661 xfs_icache_free_cowblocks(
1662 	struct xfs_mount	*mp,
1663 	struct xfs_eofblocks	*eofb)
1664 {
1665 	return __xfs_icache_free_eofblocks(mp, eofb, xfs_inode_free_cowblocks,
1666 			XFS_ICI_COWBLOCKS_TAG);
1667 }
1668 
1669 int
xfs_inode_free_quota_cowblocks(struct xfs_inode * ip)1670 xfs_inode_free_quota_cowblocks(
1671 	struct xfs_inode *ip)
1672 {
1673 	return __xfs_inode_free_quota_eofblocks(ip, xfs_icache_free_cowblocks);
1674 }
1675 
1676 void
xfs_inode_set_cowblocks_tag(xfs_inode_t * ip)1677 xfs_inode_set_cowblocks_tag(
1678 	xfs_inode_t	*ip)
1679 {
1680 	trace_xfs_inode_set_cowblocks_tag(ip);
1681 	return __xfs_inode_set_eofblocks_tag(ip, xfs_queue_cowblocks,
1682 			trace_xfs_perag_set_cowblocks,
1683 			XFS_ICI_COWBLOCKS_TAG);
1684 }
1685 
1686 void
xfs_inode_clear_cowblocks_tag(xfs_inode_t * ip)1687 xfs_inode_clear_cowblocks_tag(
1688 	xfs_inode_t	*ip)
1689 {
1690 	trace_xfs_inode_clear_cowblocks_tag(ip);
1691 	return __xfs_inode_clear_eofblocks_tag(ip,
1692 			trace_xfs_perag_clear_cowblocks, XFS_ICI_COWBLOCKS_TAG);
1693 }
1694