• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2007 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 
9 #include <linux/bio.h>
10 #include <linux/sched/signal.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/statfs.h>
16 #include <linux/seq_file.h>
17 #include <linux/mount.h>
18 #include <linux/kthread.h>
19 #include <linux/delay.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/crc32.h>
22 #include <linux/time.h>
23 #include <linux/wait.h>
24 #include <linux/writeback.h>
25 #include <linux/backing-dev.h>
26 #include <linux/kernel.h>
27 
28 #include "gfs2.h"
29 #include "incore.h"
30 #include "bmap.h"
31 #include "dir.h"
32 #include "glock.h"
33 #include "glops.h"
34 #include "inode.h"
35 #include "log.h"
36 #include "meta_io.h"
37 #include "quota.h"
38 #include "recovery.h"
39 #include "rgrp.h"
40 #include "super.h"
41 #include "trans.h"
42 #include "util.h"
43 #include "sys.h"
44 #include "xattr.h"
45 #include "lops.h"
46 
47 enum dinode_demise {
48 	SHOULD_DELETE_DINODE,
49 	SHOULD_NOT_DELETE_DINODE,
50 	SHOULD_DEFER_EVICTION,
51 };
52 
53 /**
54  * gfs2_jindex_free - Clear all the journal index information
55  * @sdp: The GFS2 superblock
56  *
57  */
58 
gfs2_jindex_free(struct gfs2_sbd * sdp)59 void gfs2_jindex_free(struct gfs2_sbd *sdp)
60 {
61 	struct list_head list;
62 	struct gfs2_jdesc *jd;
63 
64 	spin_lock(&sdp->sd_jindex_spin);
65 	list_add(&list, &sdp->sd_jindex_list);
66 	list_del_init(&sdp->sd_jindex_list);
67 	sdp->sd_journals = 0;
68 	spin_unlock(&sdp->sd_jindex_spin);
69 
70 	sdp->sd_jdesc = NULL;
71 	while (!list_empty(&list)) {
72 		jd = list_first_entry(&list, struct gfs2_jdesc, jd_list);
73 		gfs2_free_journal_extents(jd);
74 		list_del(&jd->jd_list);
75 		iput(jd->jd_inode);
76 		jd->jd_inode = NULL;
77 		kfree(jd);
78 	}
79 }
80 
jdesc_find_i(struct list_head * head,unsigned int jid)81 static struct gfs2_jdesc *jdesc_find_i(struct list_head *head, unsigned int jid)
82 {
83 	struct gfs2_jdesc *jd;
84 
85 	list_for_each_entry(jd, head, jd_list) {
86 		if (jd->jd_jid == jid)
87 			return jd;
88 	}
89 	return NULL;
90 }
91 
gfs2_jdesc_find(struct gfs2_sbd * sdp,unsigned int jid)92 struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid)
93 {
94 	struct gfs2_jdesc *jd;
95 
96 	spin_lock(&sdp->sd_jindex_spin);
97 	jd = jdesc_find_i(&sdp->sd_jindex_list, jid);
98 	spin_unlock(&sdp->sd_jindex_spin);
99 
100 	return jd;
101 }
102 
gfs2_jdesc_check(struct gfs2_jdesc * jd)103 int gfs2_jdesc_check(struct gfs2_jdesc *jd)
104 {
105 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
106 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
107 	u64 size = i_size_read(jd->jd_inode);
108 
109 	if (gfs2_check_internal_file_size(jd->jd_inode, 8 << 20, BIT(30)))
110 		return -EIO;
111 
112 	jd->jd_blocks = size >> sdp->sd_sb.sb_bsize_shift;
113 
114 	if (gfs2_write_alloc_required(ip, 0, size)) {
115 		gfs2_consist_inode(ip);
116 		return -EIO;
117 	}
118 
119 	return 0;
120 }
121 
122 /**
123  * gfs2_make_fs_rw - Turn a Read-Only FS into a Read-Write one
124  * @sdp: the filesystem
125  *
126  * Returns: errno
127  */
128 
gfs2_make_fs_rw(struct gfs2_sbd * sdp)129 int gfs2_make_fs_rw(struct gfs2_sbd *sdp)
130 {
131 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
132 	struct gfs2_glock *j_gl = ip->i_gl;
133 	struct gfs2_log_header_host head;
134 	int error;
135 
136 	j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
137 	if (gfs2_withdrawn(sdp))
138 		return -EIO;
139 
140 	error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
141 	if (error) {
142 		gfs2_consist(sdp);
143 		return error;
144 	}
145 
146 	if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
147 		gfs2_consist(sdp);
148 		return -EIO;
149 	}
150 
151 	/*  Initialize some head of the log stuff  */
152 	sdp->sd_log_sequence = head.lh_sequence + 1;
153 	gfs2_log_pointers_init(sdp, head.lh_blkno);
154 
155 	error = gfs2_quota_init(sdp);
156 	if (!error && gfs2_withdrawn(sdp))
157 		error = -EIO;
158 	if (!error)
159 		set_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
160 	return error;
161 }
162 
gfs2_statfs_change_in(struct gfs2_statfs_change_host * sc,const void * buf)163 void gfs2_statfs_change_in(struct gfs2_statfs_change_host *sc, const void *buf)
164 {
165 	const struct gfs2_statfs_change *str = buf;
166 
167 	sc->sc_total = be64_to_cpu(str->sc_total);
168 	sc->sc_free = be64_to_cpu(str->sc_free);
169 	sc->sc_dinodes = be64_to_cpu(str->sc_dinodes);
170 }
171 
gfs2_statfs_change_out(const struct gfs2_statfs_change_host * sc,void * buf)172 void gfs2_statfs_change_out(const struct gfs2_statfs_change_host *sc, void *buf)
173 {
174 	struct gfs2_statfs_change *str = buf;
175 
176 	str->sc_total = cpu_to_be64(sc->sc_total);
177 	str->sc_free = cpu_to_be64(sc->sc_free);
178 	str->sc_dinodes = cpu_to_be64(sc->sc_dinodes);
179 }
180 
gfs2_statfs_init(struct gfs2_sbd * sdp)181 int gfs2_statfs_init(struct gfs2_sbd *sdp)
182 {
183 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
184 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
185 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
186 	struct buffer_head *m_bh;
187 	struct gfs2_holder gh;
188 	int error;
189 
190 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
191 				   &gh);
192 	if (error)
193 		return error;
194 
195 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
196 	if (error)
197 		goto out;
198 
199 	if (sdp->sd_args.ar_spectator) {
200 		spin_lock(&sdp->sd_statfs_spin);
201 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
202 				      sizeof(struct gfs2_dinode));
203 		spin_unlock(&sdp->sd_statfs_spin);
204 	} else {
205 		spin_lock(&sdp->sd_statfs_spin);
206 		gfs2_statfs_change_in(m_sc, m_bh->b_data +
207 				      sizeof(struct gfs2_dinode));
208 		gfs2_statfs_change_in(l_sc, sdp->sd_sc_bh->b_data +
209 				      sizeof(struct gfs2_dinode));
210 		spin_unlock(&sdp->sd_statfs_spin);
211 
212 	}
213 
214 	brelse(m_bh);
215 out:
216 	gfs2_glock_dq_uninit(&gh);
217 	return 0;
218 }
219 
gfs2_statfs_change(struct gfs2_sbd * sdp,s64 total,s64 free,s64 dinodes)220 void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
221 			s64 dinodes)
222 {
223 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
224 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
225 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
226 	s64 x, y;
227 	int need_sync = 0;
228 
229 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
230 
231 	spin_lock(&sdp->sd_statfs_spin);
232 	l_sc->sc_total += total;
233 	l_sc->sc_free += free;
234 	l_sc->sc_dinodes += dinodes;
235 	gfs2_statfs_change_out(l_sc, sdp->sd_sc_bh->b_data +
236 			       sizeof(struct gfs2_dinode));
237 	if (sdp->sd_args.ar_statfs_percent) {
238 		x = 100 * l_sc->sc_free;
239 		y = m_sc->sc_free * sdp->sd_args.ar_statfs_percent;
240 		if (x >= y || x <= -y)
241 			need_sync = 1;
242 	}
243 	spin_unlock(&sdp->sd_statfs_spin);
244 
245 	if (need_sync)
246 		gfs2_wake_up_statfs(sdp);
247 }
248 
update_statfs(struct gfs2_sbd * sdp,struct buffer_head * m_bh)249 void update_statfs(struct gfs2_sbd *sdp, struct buffer_head *m_bh)
250 {
251 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
252 	struct gfs2_inode *l_ip = GFS2_I(sdp->sd_sc_inode);
253 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
254 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
255 
256 	gfs2_trans_add_meta(l_ip->i_gl, sdp->sd_sc_bh);
257 	gfs2_trans_add_meta(m_ip->i_gl, m_bh);
258 
259 	spin_lock(&sdp->sd_statfs_spin);
260 	m_sc->sc_total += l_sc->sc_total;
261 	m_sc->sc_free += l_sc->sc_free;
262 	m_sc->sc_dinodes += l_sc->sc_dinodes;
263 	memset(l_sc, 0, sizeof(struct gfs2_statfs_change));
264 	memset(sdp->sd_sc_bh->b_data + sizeof(struct gfs2_dinode),
265 	       0, sizeof(struct gfs2_statfs_change));
266 	gfs2_statfs_change_out(m_sc, m_bh->b_data + sizeof(struct gfs2_dinode));
267 	spin_unlock(&sdp->sd_statfs_spin);
268 }
269 
gfs2_statfs_sync(struct super_block * sb,int type)270 int gfs2_statfs_sync(struct super_block *sb, int type)
271 {
272 	struct gfs2_sbd *sdp = sb->s_fs_info;
273 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
274 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
275 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
276 	struct gfs2_holder gh;
277 	struct buffer_head *m_bh;
278 	int error;
279 
280 	error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, GL_NOCACHE,
281 				   &gh);
282 	if (error)
283 		goto out;
284 
285 	error = gfs2_meta_inode_buffer(m_ip, &m_bh);
286 	if (error)
287 		goto out_unlock;
288 
289 	spin_lock(&sdp->sd_statfs_spin);
290 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
291 			      sizeof(struct gfs2_dinode));
292 	if (!l_sc->sc_total && !l_sc->sc_free && !l_sc->sc_dinodes) {
293 		spin_unlock(&sdp->sd_statfs_spin);
294 		goto out_bh;
295 	}
296 	spin_unlock(&sdp->sd_statfs_spin);
297 
298 	error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0);
299 	if (error)
300 		goto out_bh;
301 
302 	update_statfs(sdp, m_bh);
303 	sdp->sd_statfs_force_sync = 0;
304 
305 	gfs2_trans_end(sdp);
306 
307 out_bh:
308 	brelse(m_bh);
309 out_unlock:
310 	gfs2_glock_dq_uninit(&gh);
311 out:
312 	return error;
313 }
314 
315 struct lfcc {
316 	struct list_head list;
317 	struct gfs2_holder gh;
318 };
319 
320 /**
321  * gfs2_lock_fs_check_clean - Stop all writes to the FS and check that all
322  *                            journals are clean
323  * @sdp: the file system
324  *
325  * Returns: errno
326  */
327 
gfs2_lock_fs_check_clean(struct gfs2_sbd * sdp)328 static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp)
329 {
330 	struct gfs2_inode *ip;
331 	struct gfs2_jdesc *jd;
332 	struct lfcc *lfcc;
333 	LIST_HEAD(list);
334 	struct gfs2_log_header_host lh;
335 	int error;
336 
337 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
338 		lfcc = kmalloc(sizeof(struct lfcc), GFP_KERNEL);
339 		if (!lfcc) {
340 			error = -ENOMEM;
341 			goto out;
342 		}
343 		ip = GFS2_I(jd->jd_inode);
344 		error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &lfcc->gh);
345 		if (error) {
346 			kfree(lfcc);
347 			goto out;
348 		}
349 		list_add(&lfcc->list, &list);
350 	}
351 
352 	error = gfs2_glock_nq_init(sdp->sd_freeze_gl, LM_ST_EXCLUSIVE,
353 				   LM_FLAG_NOEXP | GL_NOPID,
354 				   &sdp->sd_freeze_gh);
355 	if (error)
356 		goto out;
357 
358 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
359 		error = gfs2_jdesc_check(jd);
360 		if (error)
361 			break;
362 		error = gfs2_find_jhead(jd, &lh, false);
363 		if (error)
364 			break;
365 		if (!(lh.lh_flags & GFS2_LOG_HEAD_UNMOUNT)) {
366 			error = -EBUSY;
367 			break;
368 		}
369 	}
370 
371 	if (error)
372 		gfs2_freeze_unlock(&sdp->sd_freeze_gh);
373 
374 out:
375 	while (!list_empty(&list)) {
376 		lfcc = list_first_entry(&list, struct lfcc, list);
377 		list_del(&lfcc->list);
378 		gfs2_glock_dq_uninit(&lfcc->gh);
379 		kfree(lfcc);
380 	}
381 	return error;
382 }
383 
gfs2_dinode_out(const struct gfs2_inode * ip,void * buf)384 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf)
385 {
386 	const struct inode *inode = &ip->i_inode;
387 	struct gfs2_dinode *str = buf;
388 
389 	str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
390 	str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI);
391 	str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI);
392 	str->di_num.no_addr = cpu_to_be64(ip->i_no_addr);
393 	str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino);
394 	str->di_mode = cpu_to_be32(inode->i_mode);
395 	str->di_uid = cpu_to_be32(i_uid_read(inode));
396 	str->di_gid = cpu_to_be32(i_gid_read(inode));
397 	str->di_nlink = cpu_to_be32(inode->i_nlink);
398 	str->di_size = cpu_to_be64(i_size_read(inode));
399 	str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(inode));
400 	str->di_atime = cpu_to_be64(inode->i_atime.tv_sec);
401 	str->di_mtime = cpu_to_be64(inode->i_mtime.tv_sec);
402 	str->di_ctime = cpu_to_be64(inode->i_ctime.tv_sec);
403 
404 	str->di_goal_meta = cpu_to_be64(ip->i_goal);
405 	str->di_goal_data = cpu_to_be64(ip->i_goal);
406 	str->di_generation = cpu_to_be64(ip->i_generation);
407 
408 	str->di_flags = cpu_to_be32(ip->i_diskflags);
409 	str->di_height = cpu_to_be16(ip->i_height);
410 	str->di_payload_format = cpu_to_be32(S_ISDIR(inode->i_mode) &&
411 					     !(ip->i_diskflags & GFS2_DIF_EXHASH) ?
412 					     GFS2_FORMAT_DE : 0);
413 	str->di_depth = cpu_to_be16(ip->i_depth);
414 	str->di_entries = cpu_to_be32(ip->i_entries);
415 
416 	str->di_eattr = cpu_to_be64(ip->i_eattr);
417 	str->di_atime_nsec = cpu_to_be32(inode->i_atime.tv_nsec);
418 	str->di_mtime_nsec = cpu_to_be32(inode->i_mtime.tv_nsec);
419 	str->di_ctime_nsec = cpu_to_be32(inode->i_ctime.tv_nsec);
420 }
421 
422 /**
423  * gfs2_write_inode - Make sure the inode is stable on the disk
424  * @inode: The inode
425  * @wbc: The writeback control structure
426  *
427  * Returns: errno
428  */
429 
gfs2_write_inode(struct inode * inode,struct writeback_control * wbc)430 static int gfs2_write_inode(struct inode *inode, struct writeback_control *wbc)
431 {
432 	struct gfs2_inode *ip = GFS2_I(inode);
433 	struct gfs2_sbd *sdp = GFS2_SB(inode);
434 	struct address_space *metamapping = gfs2_glock2aspace(ip->i_gl);
435 	struct backing_dev_info *bdi = inode_to_bdi(metamapping->host);
436 	int ret = 0;
437 	bool flush_all = (wbc->sync_mode == WB_SYNC_ALL || gfs2_is_jdata(ip));
438 
439 	if (flush_all)
440 		gfs2_log_flush(GFS2_SB(inode), ip->i_gl,
441 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
442 			       GFS2_LFC_WRITE_INODE);
443 	if (bdi->wb.dirty_exceeded)
444 		gfs2_ail1_flush(sdp, wbc);
445 	else
446 		filemap_fdatawrite(metamapping);
447 	if (flush_all)
448 		ret = filemap_fdatawait(metamapping);
449 	if (ret)
450 		mark_inode_dirty_sync(inode);
451 	else {
452 		spin_lock(&inode->i_lock);
453 		if (!(inode->i_flags & I_DIRTY))
454 			gfs2_ordered_del_inode(ip);
455 		spin_unlock(&inode->i_lock);
456 	}
457 	return ret;
458 }
459 
460 /**
461  * gfs2_dirty_inode - check for atime updates
462  * @inode: The inode in question
463  * @flags: The type of dirty
464  *
465  * Unfortunately it can be called under any combination of inode
466  * glock and transaction lock, so we have to check carefully.
467  *
468  * At the moment this deals only with atime - it should be possible
469  * to expand that role in future, once a review of the locking has
470  * been carried out.
471  */
472 
gfs2_dirty_inode(struct inode * inode,int flags)473 static void gfs2_dirty_inode(struct inode *inode, int flags)
474 {
475 	struct gfs2_inode *ip = GFS2_I(inode);
476 	struct gfs2_sbd *sdp = GFS2_SB(inode);
477 	struct buffer_head *bh;
478 	struct gfs2_holder gh;
479 	int need_unlock = 0;
480 	int need_endtrans = 0;
481 	int ret;
482 
483 	if (unlikely(gfs2_withdrawn(sdp)))
484 		return;
485 	if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
486 		ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
487 		if (ret) {
488 			fs_err(sdp, "dirty_inode: glock %d\n", ret);
489 			gfs2_dump_glock(NULL, ip->i_gl, true);
490 			return;
491 		}
492 		need_unlock = 1;
493 	} else if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
494 		return;
495 
496 	if (current->journal_info == NULL) {
497 		ret = gfs2_trans_begin(sdp, RES_DINODE, 0);
498 		if (ret) {
499 			fs_err(sdp, "dirty_inode: gfs2_trans_begin %d\n", ret);
500 			goto out;
501 		}
502 		need_endtrans = 1;
503 	}
504 
505 	ret = gfs2_meta_inode_buffer(ip, &bh);
506 	if (ret == 0) {
507 		gfs2_trans_add_meta(ip->i_gl, bh);
508 		gfs2_dinode_out(ip, bh->b_data);
509 		brelse(bh);
510 	}
511 
512 	if (need_endtrans)
513 		gfs2_trans_end(sdp);
514 out:
515 	if (need_unlock)
516 		gfs2_glock_dq_uninit(&gh);
517 }
518 
519 /**
520  * gfs2_make_fs_ro - Turn a Read-Write FS into a Read-Only one
521  * @sdp: the filesystem
522  *
523  * Returns: errno
524  */
525 
gfs2_make_fs_ro(struct gfs2_sbd * sdp)526 void gfs2_make_fs_ro(struct gfs2_sbd *sdp)
527 {
528 	int log_write_allowed = test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
529 
530 	gfs2_flush_delete_work(sdp);
531 	if (!log_write_allowed && current == sdp->sd_quotad_process)
532 		fs_warn(sdp, "The quotad daemon is withdrawing.\n");
533 	else if (sdp->sd_quotad_process)
534 		kthread_stop(sdp->sd_quotad_process);
535 	sdp->sd_quotad_process = NULL;
536 
537 	if (!log_write_allowed && current == sdp->sd_logd_process)
538 		fs_warn(sdp, "The logd daemon is withdrawing.\n");
539 	else if (sdp->sd_logd_process)
540 		kthread_stop(sdp->sd_logd_process);
541 	sdp->sd_logd_process = NULL;
542 
543 	if (log_write_allowed) {
544 		gfs2_quota_sync(sdp->sd_vfs, 0);
545 		gfs2_statfs_sync(sdp->sd_vfs, 0);
546 
547 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_SHUTDOWN |
548 			       GFS2_LFC_MAKE_FS_RO);
549 		wait_event_timeout(sdp->sd_log_waitq,
550 				   gfs2_log_is_empty(sdp),
551 				   HZ * 5);
552 		gfs2_assert_warn(sdp, gfs2_log_is_empty(sdp));
553 	} else {
554 		wait_event_timeout(sdp->sd_log_waitq,
555 				   gfs2_log_is_empty(sdp),
556 				   HZ * 5);
557 	}
558 	gfs2_quota_cleanup(sdp);
559 
560 	if (!log_write_allowed)
561 		sdp->sd_vfs->s_flags |= SB_RDONLY;
562 }
563 
564 /**
565  * gfs2_put_super - Unmount the filesystem
566  * @sb: The VFS superblock
567  *
568  */
569 
gfs2_put_super(struct super_block * sb)570 static void gfs2_put_super(struct super_block *sb)
571 {
572 	struct gfs2_sbd *sdp = sb->s_fs_info;
573 	struct gfs2_jdesc *jd;
574 
575 	/* No more recovery requests */
576 	set_bit(SDF_NORECOVERY, &sdp->sd_flags);
577 	smp_mb();
578 
579 	/* Wait on outstanding recovery */
580 restart:
581 	spin_lock(&sdp->sd_jindex_spin);
582 	list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
583 		if (!test_bit(JDF_RECOVERY, &jd->jd_flags))
584 			continue;
585 		spin_unlock(&sdp->sd_jindex_spin);
586 		wait_on_bit(&jd->jd_flags, JDF_RECOVERY,
587 			    TASK_UNINTERRUPTIBLE);
588 		goto restart;
589 	}
590 	spin_unlock(&sdp->sd_jindex_spin);
591 
592 	if (!sb_rdonly(sb)) {
593 		gfs2_make_fs_ro(sdp);
594 	}
595 	WARN_ON(gfs2_withdrawing(sdp));
596 
597 	/*  At this point, we're through modifying the disk  */
598 
599 	/*  Release stuff  */
600 
601 	iput(sdp->sd_jindex);
602 	iput(sdp->sd_statfs_inode);
603 	iput(sdp->sd_rindex);
604 	iput(sdp->sd_quota_inode);
605 
606 	gfs2_glock_put(sdp->sd_rename_gl);
607 	gfs2_glock_put(sdp->sd_freeze_gl);
608 
609 	if (!sdp->sd_args.ar_spectator) {
610 		if (gfs2_holder_initialized(&sdp->sd_journal_gh))
611 			gfs2_glock_dq_uninit(&sdp->sd_journal_gh);
612 		if (gfs2_holder_initialized(&sdp->sd_jinode_gh))
613 			gfs2_glock_dq_uninit(&sdp->sd_jinode_gh);
614 		brelse(sdp->sd_sc_bh);
615 		gfs2_glock_dq_uninit(&sdp->sd_sc_gh);
616 		gfs2_glock_dq_uninit(&sdp->sd_qc_gh);
617 		free_local_statfs_inodes(sdp);
618 		iput(sdp->sd_qc_inode);
619 	}
620 
621 	gfs2_glock_dq_uninit(&sdp->sd_live_gh);
622 	gfs2_clear_rgrpd(sdp);
623 	gfs2_jindex_free(sdp);
624 	/*  Take apart glock structures and buffer lists  */
625 	gfs2_gl_hash_clear(sdp);
626 	truncate_inode_pages_final(&sdp->sd_aspace);
627 	gfs2_delete_debugfs_file(sdp);
628 	/*  Unmount the locking protocol  */
629 	gfs2_lm_unmount(sdp);
630 
631 	/*  At this point, we're through participating in the lockspace  */
632 	gfs2_sys_fs_del(sdp);
633 	free_sbd(sdp);
634 }
635 
636 /**
637  * gfs2_sync_fs - sync the filesystem
638  * @sb: the superblock
639  * @wait: true to wait for completion
640  *
641  * Flushes the log to disk.
642  */
643 
gfs2_sync_fs(struct super_block * sb,int wait)644 static int gfs2_sync_fs(struct super_block *sb, int wait)
645 {
646 	struct gfs2_sbd *sdp = sb->s_fs_info;
647 
648 	gfs2_quota_sync(sb, -1);
649 	if (wait)
650 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
651 			       GFS2_LFC_SYNC_FS);
652 	return sdp->sd_log_error;
653 }
654 
gfs2_freeze_func(struct work_struct * work)655 void gfs2_freeze_func(struct work_struct *work)
656 {
657 	int error;
658 	struct gfs2_holder freeze_gh;
659 	struct gfs2_sbd *sdp = container_of(work, struct gfs2_sbd, sd_freeze_work);
660 	struct super_block *sb = sdp->sd_vfs;
661 
662 	atomic_inc(&sb->s_active);
663 	error = gfs2_freeze_lock(sdp, &freeze_gh, 0);
664 	if (error) {
665 		gfs2_assert_withdraw(sdp, 0);
666 	} else {
667 		atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
668 		error = thaw_super(sb);
669 		if (error) {
670 			fs_info(sdp, "GFS2: couldn't thaw filesystem: %d\n",
671 				error);
672 			gfs2_assert_withdraw(sdp, 0);
673 		}
674 		gfs2_freeze_unlock(&freeze_gh);
675 	}
676 	deactivate_super(sb);
677 	clear_bit_unlock(SDF_FS_FROZEN, &sdp->sd_flags);
678 	wake_up_bit(&sdp->sd_flags, SDF_FS_FROZEN);
679 	return;
680 }
681 
682 /**
683  * gfs2_freeze - prevent further writes to the filesystem
684  * @sb: the VFS structure for the filesystem
685  *
686  */
687 
gfs2_freeze(struct super_block * sb)688 static int gfs2_freeze(struct super_block *sb)
689 {
690 	struct gfs2_sbd *sdp = sb->s_fs_info;
691 	int error;
692 
693 	mutex_lock(&sdp->sd_freeze_mutex);
694 	if (atomic_read(&sdp->sd_freeze_state) != SFS_UNFROZEN) {
695 		error = -EBUSY;
696 		goto out;
697 	}
698 
699 	for (;;) {
700 		if (gfs2_withdrawn(sdp)) {
701 			error = -EINVAL;
702 			goto out;
703 		}
704 
705 		error = gfs2_lock_fs_check_clean(sdp);
706 		if (!error)
707 			break;
708 
709 		if (error == -EBUSY)
710 			fs_err(sdp, "waiting for recovery before freeze\n");
711 		else if (error == -EIO) {
712 			fs_err(sdp, "Fatal IO error: cannot freeze gfs2 due "
713 			       "to recovery error.\n");
714 			goto out;
715 		} else {
716 			fs_err(sdp, "error freezing FS: %d\n", error);
717 		}
718 		fs_err(sdp, "retrying...\n");
719 		msleep(1000);
720 	}
721 	set_bit(SDF_FS_FROZEN, &sdp->sd_flags);
722 out:
723 	mutex_unlock(&sdp->sd_freeze_mutex);
724 	return error;
725 }
726 
727 /**
728  * gfs2_unfreeze - reallow writes to the filesystem
729  * @sb: the VFS structure for the filesystem
730  *
731  */
732 
gfs2_unfreeze(struct super_block * sb)733 static int gfs2_unfreeze(struct super_block *sb)
734 {
735 	struct gfs2_sbd *sdp = sb->s_fs_info;
736 
737 	mutex_lock(&sdp->sd_freeze_mutex);
738 	if (atomic_read(&sdp->sd_freeze_state) != SFS_FROZEN ||
739 	    !gfs2_holder_initialized(&sdp->sd_freeze_gh)) {
740 		mutex_unlock(&sdp->sd_freeze_mutex);
741 		return -EINVAL;
742 	}
743 
744 	gfs2_freeze_unlock(&sdp->sd_freeze_gh);
745 	mutex_unlock(&sdp->sd_freeze_mutex);
746 	return wait_on_bit(&sdp->sd_flags, SDF_FS_FROZEN, TASK_INTERRUPTIBLE);
747 }
748 
749 /**
750  * statfs_slow_fill - fill in the sg for a given RG
751  * @rgd: the RG
752  * @sc: the sc structure
753  *
754  * Returns: 0 on success, -ESTALE if the LVB is invalid
755  */
756 
statfs_slow_fill(struct gfs2_rgrpd * rgd,struct gfs2_statfs_change_host * sc)757 static int statfs_slow_fill(struct gfs2_rgrpd *rgd,
758 			    struct gfs2_statfs_change_host *sc)
759 {
760 	gfs2_rgrp_verify(rgd);
761 	sc->sc_total += rgd->rd_data;
762 	sc->sc_free += rgd->rd_free;
763 	sc->sc_dinodes += rgd->rd_dinodes;
764 	return 0;
765 }
766 
767 /**
768  * gfs2_statfs_slow - Stat a filesystem using asynchronous locking
769  * @sdp: the filesystem
770  * @sc: the sc info that will be returned
771  *
772  * Any error (other than a signal) will cause this routine to fall back
773  * to the synchronous version.
774  *
775  * FIXME: This really shouldn't busy wait like this.
776  *
777  * Returns: errno
778  */
779 
gfs2_statfs_slow(struct gfs2_sbd * sdp,struct gfs2_statfs_change_host * sc)780 static int gfs2_statfs_slow(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
781 {
782 	struct gfs2_rgrpd *rgd_next;
783 	struct gfs2_holder *gha, *gh;
784 	unsigned int slots = 64;
785 	unsigned int x;
786 	int done;
787 	int error = 0, err;
788 
789 	memset(sc, 0, sizeof(struct gfs2_statfs_change_host));
790 	gha = kmalloc_array(slots, sizeof(struct gfs2_holder), GFP_KERNEL);
791 	if (!gha)
792 		return -ENOMEM;
793 	for (x = 0; x < slots; x++)
794 		gfs2_holder_mark_uninitialized(gha + x);
795 
796 	rgd_next = gfs2_rgrpd_get_first(sdp);
797 
798 	for (;;) {
799 		done = 1;
800 
801 		for (x = 0; x < slots; x++) {
802 			gh = gha + x;
803 
804 			if (gfs2_holder_initialized(gh) && gfs2_glock_poll(gh)) {
805 				err = gfs2_glock_wait(gh);
806 				if (err) {
807 					gfs2_holder_uninit(gh);
808 					error = err;
809 				} else {
810 					if (!error) {
811 						struct gfs2_rgrpd *rgd =
812 							gfs2_glock2rgrp(gh->gh_gl);
813 
814 						error = statfs_slow_fill(rgd, sc);
815 					}
816 					gfs2_glock_dq_uninit(gh);
817 				}
818 			}
819 
820 			if (gfs2_holder_initialized(gh))
821 				done = 0;
822 			else if (rgd_next && !error) {
823 				error = gfs2_glock_nq_init(rgd_next->rd_gl,
824 							   LM_ST_SHARED,
825 							   GL_ASYNC,
826 							   gh);
827 				rgd_next = gfs2_rgrpd_get_next(rgd_next);
828 				done = 0;
829 			}
830 
831 			if (signal_pending(current))
832 				error = -ERESTARTSYS;
833 		}
834 
835 		if (done)
836 			break;
837 
838 		yield();
839 	}
840 
841 	kfree(gha);
842 	return error;
843 }
844 
845 /**
846  * gfs2_statfs_i - Do a statfs
847  * @sdp: the filesystem
848  * @sc: the sc structure
849  *
850  * Returns: errno
851  */
852 
gfs2_statfs_i(struct gfs2_sbd * sdp,struct gfs2_statfs_change_host * sc)853 static int gfs2_statfs_i(struct gfs2_sbd *sdp, struct gfs2_statfs_change_host *sc)
854 {
855 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
856 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
857 
858 	spin_lock(&sdp->sd_statfs_spin);
859 
860 	*sc = *m_sc;
861 	sc->sc_total += l_sc->sc_total;
862 	sc->sc_free += l_sc->sc_free;
863 	sc->sc_dinodes += l_sc->sc_dinodes;
864 
865 	spin_unlock(&sdp->sd_statfs_spin);
866 
867 	if (sc->sc_free < 0)
868 		sc->sc_free = 0;
869 	if (sc->sc_free > sc->sc_total)
870 		sc->sc_free = sc->sc_total;
871 	if (sc->sc_dinodes < 0)
872 		sc->sc_dinodes = 0;
873 
874 	return 0;
875 }
876 
877 /**
878  * gfs2_statfs - Gather and return stats about the filesystem
879  * @dentry: The name of the link
880  * @buf: The buffer
881  *
882  * Returns: 0 on success or error code
883  */
884 
gfs2_statfs(struct dentry * dentry,struct kstatfs * buf)885 static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
886 {
887 	struct super_block *sb = dentry->d_sb;
888 	struct gfs2_sbd *sdp = sb->s_fs_info;
889 	struct gfs2_statfs_change_host sc;
890 	int error;
891 
892 	error = gfs2_rindex_update(sdp);
893 	if (error)
894 		return error;
895 
896 	if (gfs2_tune_get(sdp, gt_statfs_slow))
897 		error = gfs2_statfs_slow(sdp, &sc);
898 	else
899 		error = gfs2_statfs_i(sdp, &sc);
900 
901 	if (error)
902 		return error;
903 
904 	buf->f_type = GFS2_MAGIC;
905 	buf->f_bsize = sdp->sd_sb.sb_bsize;
906 	buf->f_blocks = sc.sc_total;
907 	buf->f_bfree = sc.sc_free;
908 	buf->f_bavail = sc.sc_free;
909 	buf->f_files = sc.sc_dinodes + sc.sc_free;
910 	buf->f_ffree = sc.sc_free;
911 	buf->f_namelen = GFS2_FNAMESIZE;
912 
913 	return 0;
914 }
915 
916 /**
917  * gfs2_drop_inode - Drop an inode (test for remote unlink)
918  * @inode: The inode to drop
919  *
920  * If we've received a callback on an iopen lock then it's because a
921  * remote node tried to deallocate the inode but failed due to this node
922  * still having the inode open. Here we mark the link count zero
923  * since we know that it must have reached zero if the GLF_DEMOTE flag
924  * is set on the iopen glock. If we didn't do a disk read since the
925  * remote node removed the final link then we might otherwise miss
926  * this event. This check ensures that this node will deallocate the
927  * inode's blocks, or alternatively pass the baton on to another
928  * node for later deallocation.
929  */
930 
gfs2_drop_inode(struct inode * inode)931 static int gfs2_drop_inode(struct inode *inode)
932 {
933 	struct gfs2_inode *ip = GFS2_I(inode);
934 
935 	if (!test_bit(GIF_FREE_VFS_INODE, &ip->i_flags) &&
936 	    inode->i_nlink &&
937 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
938 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
939 		if (test_bit(GLF_DEMOTE, &gl->gl_flags))
940 			clear_nlink(inode);
941 	}
942 
943 	/*
944 	 * When under memory pressure when an inode's link count has dropped to
945 	 * zero, defer deleting the inode to the delete workqueue.  This avoids
946 	 * calling into DLM under memory pressure, which can deadlock.
947 	 */
948 	if (!inode->i_nlink &&
949 	    unlikely(current->flags & PF_MEMALLOC) &&
950 	    gfs2_holder_initialized(&ip->i_iopen_gh)) {
951 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
952 
953 		gfs2_glock_hold(gl);
954 		if (!gfs2_queue_delete_work(gl, 0))
955 			gfs2_glock_queue_put(gl);
956 		return 0;
957 	}
958 
959 	return generic_drop_inode(inode);
960 }
961 
is_ancestor(const struct dentry * d1,const struct dentry * d2)962 static int is_ancestor(const struct dentry *d1, const struct dentry *d2)
963 {
964 	do {
965 		if (d1 == d2)
966 			return 1;
967 		d1 = d1->d_parent;
968 	} while (!IS_ROOT(d1));
969 	return 0;
970 }
971 
972 /**
973  * gfs2_show_options - Show mount options for /proc/mounts
974  * @s: seq_file structure
975  * @root: root of this (sub)tree
976  *
977  * Returns: 0 on success or error code
978  */
979 
gfs2_show_options(struct seq_file * s,struct dentry * root)980 static int gfs2_show_options(struct seq_file *s, struct dentry *root)
981 {
982 	struct gfs2_sbd *sdp = root->d_sb->s_fs_info;
983 	struct gfs2_args *args = &sdp->sd_args;
984 	unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum;
985 
986 	spin_lock(&sdp->sd_tune.gt_spin);
987 	logd_secs = sdp->sd_tune.gt_logd_secs;
988 	quota_quantum = sdp->sd_tune.gt_quota_quantum;
989 	statfs_quantum = sdp->sd_tune.gt_statfs_quantum;
990 	statfs_slow = sdp->sd_tune.gt_statfs_slow;
991 	spin_unlock(&sdp->sd_tune.gt_spin);
992 
993 	if (is_ancestor(root, sdp->sd_master_dir))
994 		seq_puts(s, ",meta");
995 	if (args->ar_lockproto[0])
996 		seq_show_option(s, "lockproto", args->ar_lockproto);
997 	if (args->ar_locktable[0])
998 		seq_show_option(s, "locktable", args->ar_locktable);
999 	if (args->ar_hostdata[0])
1000 		seq_show_option(s, "hostdata", args->ar_hostdata);
1001 	if (args->ar_spectator)
1002 		seq_puts(s, ",spectator");
1003 	if (args->ar_localflocks)
1004 		seq_puts(s, ",localflocks");
1005 	if (args->ar_debug)
1006 		seq_puts(s, ",debug");
1007 	if (args->ar_posix_acl)
1008 		seq_puts(s, ",acl");
1009 	if (args->ar_quota != GFS2_QUOTA_DEFAULT) {
1010 		char *state;
1011 		switch (args->ar_quota) {
1012 		case GFS2_QUOTA_OFF:
1013 			state = "off";
1014 			break;
1015 		case GFS2_QUOTA_ACCOUNT:
1016 			state = "account";
1017 			break;
1018 		case GFS2_QUOTA_ON:
1019 			state = "on";
1020 			break;
1021 		default:
1022 			state = "unknown";
1023 			break;
1024 		}
1025 		seq_printf(s, ",quota=%s", state);
1026 	}
1027 	if (args->ar_suiddir)
1028 		seq_puts(s, ",suiddir");
1029 	if (args->ar_data != GFS2_DATA_DEFAULT) {
1030 		char *state;
1031 		switch (args->ar_data) {
1032 		case GFS2_DATA_WRITEBACK:
1033 			state = "writeback";
1034 			break;
1035 		case GFS2_DATA_ORDERED:
1036 			state = "ordered";
1037 			break;
1038 		default:
1039 			state = "unknown";
1040 			break;
1041 		}
1042 		seq_printf(s, ",data=%s", state);
1043 	}
1044 	if (args->ar_discard)
1045 		seq_puts(s, ",discard");
1046 	if (logd_secs != 30)
1047 		seq_printf(s, ",commit=%d", logd_secs);
1048 	if (statfs_quantum != 30)
1049 		seq_printf(s, ",statfs_quantum=%d", statfs_quantum);
1050 	else if (statfs_slow)
1051 		seq_puts(s, ",statfs_quantum=0");
1052 	if (quota_quantum != 60)
1053 		seq_printf(s, ",quota_quantum=%d", quota_quantum);
1054 	if (args->ar_statfs_percent)
1055 		seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent);
1056 	if (args->ar_errors != GFS2_ERRORS_DEFAULT) {
1057 		const char *state;
1058 
1059 		switch (args->ar_errors) {
1060 		case GFS2_ERRORS_WITHDRAW:
1061 			state = "withdraw";
1062 			break;
1063 		case GFS2_ERRORS_PANIC:
1064 			state = "panic";
1065 			break;
1066 		default:
1067 			state = "unknown";
1068 			break;
1069 		}
1070 		seq_printf(s, ",errors=%s", state);
1071 	}
1072 	if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags))
1073 		seq_puts(s, ",nobarrier");
1074 	if (test_bit(SDF_DEMOTE, &sdp->sd_flags))
1075 		seq_puts(s, ",demote_interface_used");
1076 	if (args->ar_rgrplvb)
1077 		seq_puts(s, ",rgrplvb");
1078 	if (args->ar_loccookie)
1079 		seq_puts(s, ",loccookie");
1080 	return 0;
1081 }
1082 
gfs2_final_release_pages(struct gfs2_inode * ip)1083 static void gfs2_final_release_pages(struct gfs2_inode *ip)
1084 {
1085 	struct inode *inode = &ip->i_inode;
1086 	struct gfs2_glock *gl = ip->i_gl;
1087 
1088 	truncate_inode_pages(gfs2_glock2aspace(ip->i_gl), 0);
1089 	truncate_inode_pages(&inode->i_data, 0);
1090 
1091 	if (atomic_read(&gl->gl_revokes) == 0) {
1092 		clear_bit(GLF_LFLUSH, &gl->gl_flags);
1093 		clear_bit(GLF_DIRTY, &gl->gl_flags);
1094 	}
1095 }
1096 
gfs2_dinode_dealloc(struct gfs2_inode * ip)1097 static int gfs2_dinode_dealloc(struct gfs2_inode *ip)
1098 {
1099 	struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1100 	struct gfs2_rgrpd *rgd;
1101 	struct gfs2_holder gh;
1102 	int error;
1103 
1104 	if (gfs2_get_inode_blocks(&ip->i_inode) != 1) {
1105 		gfs2_consist_inode(ip);
1106 		return -EIO;
1107 	}
1108 
1109 	error = gfs2_rindex_update(sdp);
1110 	if (error)
1111 		return error;
1112 
1113 	error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1114 	if (error)
1115 		return error;
1116 
1117 	rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr, 1);
1118 	if (!rgd) {
1119 		gfs2_consist_inode(ip);
1120 		error = -EIO;
1121 		goto out_qs;
1122 	}
1123 
1124 	error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE,
1125 				   LM_FLAG_NODE_SCOPE, &gh);
1126 	if (error)
1127 		goto out_qs;
1128 
1129 	error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA,
1130 				 sdp->sd_jdesc->jd_blocks);
1131 	if (error)
1132 		goto out_rg_gunlock;
1133 
1134 	gfs2_free_di(rgd, ip);
1135 
1136 	gfs2_final_release_pages(ip);
1137 
1138 	gfs2_trans_end(sdp);
1139 
1140 out_rg_gunlock:
1141 	gfs2_glock_dq_uninit(&gh);
1142 out_qs:
1143 	gfs2_quota_unhold(ip);
1144 	return error;
1145 }
1146 
1147 /**
1148  * gfs2_glock_put_eventually
1149  * @gl:	The glock to put
1150  *
1151  * When under memory pressure, trigger a deferred glock put to make sure we
1152  * won't call into DLM and deadlock.  Otherwise, put the glock directly.
1153  */
1154 
gfs2_glock_put_eventually(struct gfs2_glock * gl)1155 static void gfs2_glock_put_eventually(struct gfs2_glock *gl)
1156 {
1157 	if (current->flags & PF_MEMALLOC)
1158 		gfs2_glock_queue_put(gl);
1159 	else
1160 		gfs2_glock_put(gl);
1161 }
1162 
gfs2_upgrade_iopen_glock(struct inode * inode)1163 static bool gfs2_upgrade_iopen_glock(struct inode *inode)
1164 {
1165 	struct gfs2_inode *ip = GFS2_I(inode);
1166 	struct gfs2_sbd *sdp = GFS2_SB(inode);
1167 	struct gfs2_holder *gh = &ip->i_iopen_gh;
1168 	long timeout = 5 * HZ;
1169 	int error;
1170 
1171 	gh->gh_flags |= GL_NOCACHE;
1172 	gfs2_glock_dq_wait(gh);
1173 
1174 	/*
1175 	 * If there are no other lock holders, we'll get the lock immediately.
1176 	 * Otherwise, the other nodes holding the lock will be notified about
1177 	 * our locking request.  If they don't have the inode open, they'll
1178 	 * evict the cached inode and release the lock.  Otherwise, if they
1179 	 * poke the inode glock, we'll take this as an indication that they
1180 	 * still need the iopen glock and that they'll take care of deleting
1181 	 * the inode when they're done.  As a last resort, if another node
1182 	 * keeps holding the iopen glock without showing any activity on the
1183 	 * inode glock, we'll eventually time out.
1184 	 *
1185 	 * Note that we're passing the LM_FLAG_TRY_1CB flag to the first
1186 	 * locking request as an optimization to notify lock holders as soon as
1187 	 * possible.  Without that flag, they'd be notified implicitly by the
1188 	 * second locking request.
1189 	 */
1190 
1191 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, LM_FLAG_TRY_1CB | GL_NOCACHE, gh);
1192 	error = gfs2_glock_nq(gh);
1193 	if (error != GLR_TRYFAILED)
1194 		return !error;
1195 
1196 	gfs2_holder_reinit(LM_ST_EXCLUSIVE, GL_ASYNC | GL_NOCACHE, gh);
1197 	error = gfs2_glock_nq(gh);
1198 	if (error)
1199 		return false;
1200 
1201 	timeout = wait_event_interruptible_timeout(sdp->sd_async_glock_wait,
1202 		!test_bit(HIF_WAIT, &gh->gh_iflags) ||
1203 		test_bit(GLF_DEMOTE, &ip->i_gl->gl_flags),
1204 		timeout);
1205 	if (!test_bit(HIF_HOLDER, &gh->gh_iflags)) {
1206 		gfs2_glock_dq(gh);
1207 		return false;
1208 	}
1209 	return gfs2_glock_holder_ready(gh) == 0;
1210 }
1211 
1212 /**
1213  * evict_should_delete - determine whether the inode is eligible for deletion
1214  * @inode: The inode to evict
1215  * @gh: The glock holder structure
1216  *
1217  * This function determines whether the evicted inode is eligible to be deleted
1218  * and locks the inode glock.
1219  *
1220  * Returns: the fate of the dinode
1221  */
evict_should_delete(struct inode * inode,struct gfs2_holder * gh)1222 static enum dinode_demise evict_should_delete(struct inode *inode,
1223 					      struct gfs2_holder *gh)
1224 {
1225 	struct gfs2_inode *ip = GFS2_I(inode);
1226 	struct super_block *sb = inode->i_sb;
1227 	struct gfs2_sbd *sdp = sb->s_fs_info;
1228 	int ret;
1229 
1230 	if (test_bit(GIF_ALLOC_FAILED, &ip->i_flags)) {
1231 		BUG_ON(!gfs2_glock_is_locked_by_me(ip->i_gl));
1232 		goto should_delete;
1233 	}
1234 
1235 	if (test_bit(GIF_DEFERRED_DELETE, &ip->i_flags))
1236 		return SHOULD_DEFER_EVICTION;
1237 
1238 	/* Deletes should never happen under memory pressure anymore.  */
1239 	if (WARN_ON_ONCE(current->flags & PF_MEMALLOC))
1240 		return SHOULD_DEFER_EVICTION;
1241 
1242 	/* Must not read inode block until block type has been verified */
1243 	ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, gh);
1244 	if (unlikely(ret)) {
1245 		glock_clear_object(ip->i_iopen_gh.gh_gl, ip);
1246 		ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1247 		gfs2_glock_dq_uninit(&ip->i_iopen_gh);
1248 		return SHOULD_DEFER_EVICTION;
1249 	}
1250 
1251 	if (gfs2_inode_already_deleted(ip->i_gl, ip->i_no_formal_ino))
1252 		return SHOULD_NOT_DELETE_DINODE;
1253 	ret = gfs2_check_blk_type(sdp, ip->i_no_addr, GFS2_BLKST_UNLINKED);
1254 	if (ret)
1255 		return SHOULD_NOT_DELETE_DINODE;
1256 
1257 	ret = gfs2_instantiate(gh);
1258 	if (ret)
1259 		return SHOULD_NOT_DELETE_DINODE;
1260 
1261 	/*
1262 	 * The inode may have been recreated in the meantime.
1263 	 */
1264 	if (inode->i_nlink)
1265 		return SHOULD_NOT_DELETE_DINODE;
1266 
1267 should_delete:
1268 	if (gfs2_holder_initialized(&ip->i_iopen_gh) &&
1269 	    test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1270 		if (!gfs2_upgrade_iopen_glock(inode)) {
1271 			gfs2_holder_uninit(&ip->i_iopen_gh);
1272 			return SHOULD_NOT_DELETE_DINODE;
1273 		}
1274 	}
1275 	return SHOULD_DELETE_DINODE;
1276 }
1277 
1278 /**
1279  * evict_unlinked_inode - delete the pieces of an unlinked evicted inode
1280  * @inode: The inode to evict
1281  */
evict_unlinked_inode(struct inode * inode)1282 static int evict_unlinked_inode(struct inode *inode)
1283 {
1284 	struct gfs2_inode *ip = GFS2_I(inode);
1285 	int ret;
1286 
1287 	if (S_ISDIR(inode->i_mode) &&
1288 	    (ip->i_diskflags & GFS2_DIF_EXHASH)) {
1289 		ret = gfs2_dir_exhash_dealloc(ip);
1290 		if (ret)
1291 			goto out;
1292 	}
1293 
1294 	if (ip->i_eattr) {
1295 		ret = gfs2_ea_dealloc(ip);
1296 		if (ret)
1297 			goto out;
1298 	}
1299 
1300 	if (!gfs2_is_stuffed(ip)) {
1301 		ret = gfs2_file_dealloc(ip);
1302 		if (ret)
1303 			goto out;
1304 	}
1305 
1306 	/* We're about to clear the bitmap for the dinode, but as soon as we
1307 	   do, gfs2_create_inode can create another inode at the same block
1308 	   location and try to set gl_object again. We clear gl_object here so
1309 	   that subsequent inode creates don't see an old gl_object. */
1310 	glock_clear_object(ip->i_gl, ip);
1311 	ret = gfs2_dinode_dealloc(ip);
1312 	gfs2_inode_remember_delete(ip->i_gl, ip->i_no_formal_ino);
1313 out:
1314 	return ret;
1315 }
1316 
1317 /*
1318  * evict_linked_inode - evict an inode whose dinode has not been unlinked
1319  * @inode: The inode to evict
1320  */
evict_linked_inode(struct inode * inode)1321 static int evict_linked_inode(struct inode *inode)
1322 {
1323 	struct super_block *sb = inode->i_sb;
1324 	struct gfs2_sbd *sdp = sb->s_fs_info;
1325 	struct gfs2_inode *ip = GFS2_I(inode);
1326 	struct address_space *metamapping;
1327 	int ret;
1328 
1329 	gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
1330 		       GFS2_LFC_EVICT_INODE);
1331 	metamapping = gfs2_glock2aspace(ip->i_gl);
1332 	if (test_bit(GLF_DIRTY, &ip->i_gl->gl_flags)) {
1333 		filemap_fdatawrite(metamapping);
1334 		filemap_fdatawait(metamapping);
1335 	}
1336 	write_inode_now(inode, 1);
1337 	gfs2_ail_flush(ip->i_gl, 0);
1338 
1339 	ret = gfs2_trans_begin(sdp, 0, sdp->sd_jdesc->jd_blocks);
1340 	if (ret)
1341 		return ret;
1342 
1343 	/* Needs to be done before glock release & also in a transaction */
1344 	truncate_inode_pages(&inode->i_data, 0);
1345 	truncate_inode_pages(metamapping, 0);
1346 	gfs2_trans_end(sdp);
1347 	return 0;
1348 }
1349 
1350 /**
1351  * gfs2_evict_inode - Remove an inode from cache
1352  * @inode: The inode to evict
1353  *
1354  * There are three cases to consider:
1355  * 1. i_nlink == 0, we are final opener (and must deallocate)
1356  * 2. i_nlink == 0, we are not the final opener (and cannot deallocate)
1357  * 3. i_nlink > 0
1358  *
1359  * If the fs is read only, then we have to treat all cases as per #3
1360  * since we are unable to do any deallocation. The inode will be
1361  * deallocated by the next read/write node to attempt an allocation
1362  * in the same resource group
1363  *
1364  * We have to (at the moment) hold the inodes main lock to cover
1365  * the gap between unlocking the shared lock on the iopen lock and
1366  * taking the exclusive lock. I'd rather do a shared -> exclusive
1367  * conversion on the iopen lock, but we can change that later. This
1368  * is safe, just less efficient.
1369  */
1370 
gfs2_evict_inode(struct inode * inode)1371 static void gfs2_evict_inode(struct inode *inode)
1372 {
1373 	struct super_block *sb = inode->i_sb;
1374 	struct gfs2_sbd *sdp = sb->s_fs_info;
1375 	struct gfs2_inode *ip = GFS2_I(inode);
1376 	struct gfs2_holder gh;
1377 	int ret;
1378 
1379 	if (test_bit(GIF_FREE_VFS_INODE, &ip->i_flags)) {
1380 		clear_inode(inode);
1381 		return;
1382 	}
1383 
1384 	if (inode->i_nlink || sb_rdonly(sb))
1385 		goto out;
1386 
1387 	/*
1388 	 * In case of an incomplete mount, gfs2_evict_inode() may be called for
1389 	 * system files without having an active journal to write to.  In that
1390 	 * case, skip the filesystem evict.
1391 	 */
1392 	if (!sdp->sd_jdesc)
1393 		goto out;
1394 
1395 	gfs2_holder_mark_uninitialized(&gh);
1396 	ret = evict_should_delete(inode, &gh);
1397 	if (ret == SHOULD_DEFER_EVICTION)
1398 		goto out;
1399 	if (ret == SHOULD_DELETE_DINODE)
1400 		ret = evict_unlinked_inode(inode);
1401 	else
1402 		ret = evict_linked_inode(inode);
1403 
1404 	if (gfs2_rs_active(&ip->i_res))
1405 		gfs2_rs_deltree(&ip->i_res);
1406 
1407 	if (gfs2_holder_initialized(&gh)) {
1408 		glock_clear_object(ip->i_gl, ip);
1409 		gfs2_glock_dq_uninit(&gh);
1410 	}
1411 	if (ret && ret != GLR_TRYFAILED && ret != -EROFS)
1412 		fs_warn(sdp, "gfs2_evict_inode: %d\n", ret);
1413 out:
1414 	truncate_inode_pages_final(&inode->i_data);
1415 	if (ip->i_qadata)
1416 		gfs2_assert_warn(sdp, ip->i_qadata->qa_ref == 0);
1417 	gfs2_rs_deltree(&ip->i_res);
1418 	gfs2_ordered_del_inode(ip);
1419 	clear_inode(inode);
1420 	gfs2_dir_hash_inval(ip);
1421 	if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
1422 		struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
1423 
1424 		glock_clear_object(gl, ip);
1425 		if (test_bit(HIF_HOLDER, &ip->i_iopen_gh.gh_iflags)) {
1426 			ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
1427 			gfs2_glock_dq(&ip->i_iopen_gh);
1428 		}
1429 		gfs2_glock_hold(gl);
1430 		gfs2_holder_uninit(&ip->i_iopen_gh);
1431 		gfs2_glock_put_eventually(gl);
1432 	}
1433 	if (ip->i_gl) {
1434 		glock_clear_object(ip->i_gl, ip);
1435 		wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
1436 		gfs2_glock_add_to_lru(ip->i_gl);
1437 		gfs2_glock_put_eventually(ip->i_gl);
1438 		rcu_assign_pointer(ip->i_gl, NULL);
1439 	}
1440 }
1441 
gfs2_alloc_inode(struct super_block * sb)1442 static struct inode *gfs2_alloc_inode(struct super_block *sb)
1443 {
1444 	struct gfs2_inode *ip;
1445 
1446 	ip = alloc_inode_sb(sb, gfs2_inode_cachep, GFP_KERNEL);
1447 	if (!ip)
1448 		return NULL;
1449 	ip->i_flags = 0;
1450 	ip->i_gl = NULL;
1451 	gfs2_holder_mark_uninitialized(&ip->i_iopen_gh);
1452 	memset(&ip->i_res, 0, sizeof(ip->i_res));
1453 	RB_CLEAR_NODE(&ip->i_res.rs_node);
1454 	ip->i_rahead = 0;
1455 	return &ip->i_inode;
1456 }
1457 
gfs2_free_inode(struct inode * inode)1458 static void gfs2_free_inode(struct inode *inode)
1459 {
1460 	kmem_cache_free(gfs2_inode_cachep, GFS2_I(inode));
1461 }
1462 
free_local_statfs_inodes(struct gfs2_sbd * sdp)1463 extern void free_local_statfs_inodes(struct gfs2_sbd *sdp)
1464 {
1465 	struct local_statfs_inode *lsi, *safe;
1466 
1467 	/* Run through the statfs inodes list to iput and free memory */
1468 	list_for_each_entry_safe(lsi, safe, &sdp->sd_sc_inodes_list, si_list) {
1469 		if (lsi->si_jid == sdp->sd_jdesc->jd_jid)
1470 			sdp->sd_sc_inode = NULL; /* belongs to this node */
1471 		if (lsi->si_sc_inode)
1472 			iput(lsi->si_sc_inode);
1473 		list_del(&lsi->si_list);
1474 		kfree(lsi);
1475 	}
1476 }
1477 
find_local_statfs_inode(struct gfs2_sbd * sdp,unsigned int index)1478 extern struct inode *find_local_statfs_inode(struct gfs2_sbd *sdp,
1479 					     unsigned int index)
1480 {
1481 	struct local_statfs_inode *lsi;
1482 
1483 	/* Return the local (per node) statfs inode in the
1484 	 * sdp->sd_sc_inodes_list corresponding to the 'index'. */
1485 	list_for_each_entry(lsi, &sdp->sd_sc_inodes_list, si_list) {
1486 		if (lsi->si_jid == index)
1487 			return lsi->si_sc_inode;
1488 	}
1489 	return NULL;
1490 }
1491 
1492 const struct super_operations gfs2_super_ops = {
1493 	.alloc_inode		= gfs2_alloc_inode,
1494 	.free_inode		= gfs2_free_inode,
1495 	.write_inode		= gfs2_write_inode,
1496 	.dirty_inode		= gfs2_dirty_inode,
1497 	.evict_inode		= gfs2_evict_inode,
1498 	.put_super		= gfs2_put_super,
1499 	.sync_fs		= gfs2_sync_fs,
1500 	.freeze_super		= gfs2_freeze,
1501 	.thaw_super		= gfs2_unfreeze,
1502 	.statfs			= gfs2_statfs,
1503 	.drop_inode		= gfs2_drop_inode,
1504 	.show_options		= gfs2_show_options,
1505 };
1506 
1507