• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14 
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29 
30 struct workqueue_struct *gfs2_freeze_wq;
31 
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)32 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
33 {
34 	fs_err(gl->gl_name.ln_sbd,
35 	       "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
36 	       "state 0x%lx\n",
37 	       bh, (unsigned long long)bh->b_blocknr, bh->b_state,
38 	       bh->b_page->mapping, bh->b_page->flags);
39 	fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n",
40 	       gl->gl_name.ln_type, gl->gl_name.ln_number,
41 	       gfs2_glock2aspace(gl));
42 	gfs2_lm_withdraw(gl->gl_name.ln_sbd, "AIL error\n");
43 }
44 
45 /**
46  * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
47  * @gl: the glock
48  * @fsync: set when called from fsync (not all buffers will be clean)
49  *
50  * None of the buffers should be dirty, locked, or pinned.
51  */
52 
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)53 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
54 			     unsigned int nr_revokes)
55 {
56 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
57 	struct list_head *head = &gl->gl_ail_list;
58 	struct gfs2_bufdata *bd, *tmp;
59 	struct buffer_head *bh;
60 	const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
61 
62 	gfs2_log_lock(sdp);
63 	spin_lock(&sdp->sd_ail_lock);
64 	list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
65 		if (nr_revokes == 0)
66 			break;
67 		bh = bd->bd_bh;
68 		if (bh->b_state & b_state) {
69 			if (fsync)
70 				continue;
71 			gfs2_ail_error(gl, bh);
72 		}
73 		gfs2_trans_add_revoke(sdp, bd);
74 		nr_revokes--;
75 	}
76 	GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
77 	spin_unlock(&sdp->sd_ail_lock);
78 	gfs2_log_unlock(sdp);
79 }
80 
81 
gfs2_ail_empty_gl(struct gfs2_glock * gl)82 static void gfs2_ail_empty_gl(struct gfs2_glock *gl)
83 {
84 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
85 	struct gfs2_trans tr;
86 
87 	memset(&tr, 0, sizeof(tr));
88 	INIT_LIST_HEAD(&tr.tr_buf);
89 	INIT_LIST_HEAD(&tr.tr_databuf);
90 	tr.tr_revokes = atomic_read(&gl->gl_ail_count);
91 
92 	if (!tr.tr_revokes)
93 		return;
94 
95 	/* A shortened, inline version of gfs2_trans_begin()
96          * tr->alloced is not set since the transaction structure is
97          * on the stack */
98 	tr.tr_reserved = 1 + gfs2_struct2blk(sdp, tr.tr_revokes, sizeof(u64));
99 	tr.tr_ip = _RET_IP_;
100 	if (gfs2_log_reserve(sdp, tr.tr_reserved) < 0)
101 		return;
102 	WARN_ON_ONCE(current->journal_info);
103 	current->journal_info = &tr;
104 
105 	__gfs2_ail_flush(gl, 0, tr.tr_revokes);
106 
107 	gfs2_trans_end(sdp);
108 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
109 		       GFS2_LFC_AIL_EMPTY_GL);
110 }
111 
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)112 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
113 {
114 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
115 	unsigned int revokes = atomic_read(&gl->gl_ail_count);
116 	unsigned int max_revokes = (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_log_descriptor)) / sizeof(u64);
117 	int ret;
118 
119 	if (!revokes)
120 		return;
121 
122 	while (revokes > max_revokes)
123 		max_revokes += (sdp->sd_sb.sb_bsize - sizeof(struct gfs2_meta_header)) / sizeof(u64);
124 
125 	ret = gfs2_trans_begin(sdp, 0, max_revokes);
126 	if (ret)
127 		return;
128 	__gfs2_ail_flush(gl, fsync, max_revokes);
129 	gfs2_trans_end(sdp);
130 	gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
131 		       GFS2_LFC_AIL_FLUSH);
132 }
133 
134 /**
135  * rgrp_go_sync - sync out the metadata for this glock
136  * @gl: the glock
137  *
138  * Called when demoting or unlocking an EX glock.  We must flush
139  * to disk all dirty buffers/pages relating to this glock, and must not
140  * return to caller to demote/unlock the glock until I/O is complete.
141  */
142 
rgrp_go_sync(struct gfs2_glock * gl)143 static void rgrp_go_sync(struct gfs2_glock *gl)
144 {
145 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
146 	struct address_space *mapping = &sdp->sd_aspace;
147 	struct gfs2_rgrpd *rgd;
148 	int error;
149 
150 	spin_lock(&gl->gl_lockref.lock);
151 	rgd = gl->gl_object;
152 	if (rgd)
153 		gfs2_rgrp_brelse(rgd);
154 	spin_unlock(&gl->gl_lockref.lock);
155 
156 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
157 		return;
158 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
159 
160 	gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
161 		       GFS2_LFC_RGRP_GO_SYNC);
162 	filemap_fdatawrite_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
163 	error = filemap_fdatawait_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
164 	mapping_set_error(mapping, error);
165 	gfs2_ail_empty_gl(gl);
166 
167 	spin_lock(&gl->gl_lockref.lock);
168 	rgd = gl->gl_object;
169 	if (rgd)
170 		gfs2_free_clones(rgd);
171 	spin_unlock(&gl->gl_lockref.lock);
172 }
173 
174 /**
175  * rgrp_go_inval - invalidate the metadata for this glock
176  * @gl: the glock
177  * @flags:
178  *
179  * We never used LM_ST_DEFERRED with resource groups, so that we
180  * should always see the metadata flag set here.
181  *
182  */
183 
rgrp_go_inval(struct gfs2_glock * gl,int flags)184 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
185 {
186 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
187 	struct address_space *mapping = &sdp->sd_aspace;
188 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
189 
190 	if (rgd)
191 		gfs2_rgrp_brelse(rgd);
192 
193 	WARN_ON_ONCE(!(flags & DIO_METADATA));
194 	gfs2_assert_withdraw(sdp, !atomic_read(&gl->gl_ail_count));
195 	truncate_inode_pages_range(mapping, gl->gl_vm.start, gl->gl_vm.end);
196 
197 	if (rgd)
198 		rgd->rd_flags &= ~GFS2_RDF_UPTODATE;
199 }
200 
gfs2_glock2inode(struct gfs2_glock * gl)201 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
202 {
203 	struct gfs2_inode *ip;
204 
205 	spin_lock(&gl->gl_lockref.lock);
206 	ip = gl->gl_object;
207 	if (ip)
208 		set_bit(GIF_GLOP_PENDING, &ip->i_flags);
209 	spin_unlock(&gl->gl_lockref.lock);
210 	return ip;
211 }
212 
gfs2_glock2rgrp(struct gfs2_glock * gl)213 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
214 {
215 	struct gfs2_rgrpd *rgd;
216 
217 	spin_lock(&gl->gl_lockref.lock);
218 	rgd = gl->gl_object;
219 	spin_unlock(&gl->gl_lockref.lock);
220 
221 	return rgd;
222 }
223 
gfs2_clear_glop_pending(struct gfs2_inode * ip)224 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
225 {
226 	if (!ip)
227 		return;
228 
229 	clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
230 	wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
231 }
232 
233 /**
234  * inode_go_sync - Sync the dirty data and/or metadata for an inode glock
235  * @gl: the glock protecting the inode
236  *
237  */
238 
inode_go_sync(struct gfs2_glock * gl)239 static void inode_go_sync(struct gfs2_glock *gl)
240 {
241 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
242 	int isreg = ip && S_ISREG(ip->i_inode.i_mode);
243 	struct address_space *metamapping = gfs2_glock2aspace(gl);
244 	int error;
245 
246 	if (isreg) {
247 		if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
248 			unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
249 		inode_dio_wait(&ip->i_inode);
250 	}
251 	if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
252 		goto out;
253 
254 	GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
255 
256 	gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
257 		       GFS2_LFC_INODE_GO_SYNC);
258 	filemap_fdatawrite(metamapping);
259 	if (isreg) {
260 		struct address_space *mapping = ip->i_inode.i_mapping;
261 		filemap_fdatawrite(mapping);
262 		error = filemap_fdatawait(mapping);
263 		mapping_set_error(mapping, error);
264 	}
265 	error = filemap_fdatawait(metamapping);
266 	mapping_set_error(metamapping, error);
267 	gfs2_ail_empty_gl(gl);
268 	/*
269 	 * Writeback of the data mapping may cause the dirty flag to be set
270 	 * so we have to clear it again here.
271 	 */
272 	smp_mb__before_atomic();
273 	clear_bit(GLF_DIRTY, &gl->gl_flags);
274 
275 out:
276 	gfs2_clear_glop_pending(ip);
277 }
278 
279 /**
280  * inode_go_inval - prepare a inode glock to be released
281  * @gl: the glock
282  * @flags:
283  *
284  * Normally we invalidate everything, but if we are moving into
285  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
286  * can keep hold of the metadata, since it won't have changed.
287  *
288  */
289 
inode_go_inval(struct gfs2_glock * gl,int flags)290 static void inode_go_inval(struct gfs2_glock *gl, int flags)
291 {
292 	struct gfs2_inode *ip = gfs2_glock2inode(gl);
293 
294 	gfs2_assert_withdraw(gl->gl_name.ln_sbd, !atomic_read(&gl->gl_ail_count));
295 
296 	if (flags & DIO_METADATA) {
297 		struct address_space *mapping = gfs2_glock2aspace(gl);
298 		truncate_inode_pages(mapping, 0);
299 		if (ip) {
300 			set_bit(GIF_INVALID, &ip->i_flags);
301 			forget_all_cached_acls(&ip->i_inode);
302 			security_inode_invalidate_secctx(&ip->i_inode);
303 			gfs2_dir_hash_inval(ip);
304 		}
305 	}
306 
307 	if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
308 		gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
309 			       GFS2_LOG_HEAD_FLUSH_NORMAL |
310 			       GFS2_LFC_INODE_GO_INVAL);
311 		gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
312 	}
313 	if (ip && S_ISREG(ip->i_inode.i_mode))
314 		truncate_inode_pages(ip->i_inode.i_mapping, 0);
315 
316 	gfs2_clear_glop_pending(ip);
317 }
318 
319 /**
320  * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
321  * @gl: the glock
322  *
323  * Returns: 1 if it's ok
324  */
325 
inode_go_demote_ok(const struct gfs2_glock * gl)326 static int inode_go_demote_ok(const struct gfs2_glock *gl)
327 {
328 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
329 
330 	if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
331 		return 0;
332 
333 	return 1;
334 }
335 
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)336 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
337 {
338 	const struct gfs2_dinode *str = buf;
339 	struct timespec64 atime;
340 	u16 height, depth;
341 
342 	if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
343 		goto corrupt;
344 	ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
345 	ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
346 	ip->i_inode.i_rdev = 0;
347 	switch (ip->i_inode.i_mode & S_IFMT) {
348 	case S_IFBLK:
349 	case S_IFCHR:
350 		ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
351 					   be32_to_cpu(str->di_minor));
352 		break;
353 	};
354 
355 	i_uid_write(&ip->i_inode, be32_to_cpu(str->di_uid));
356 	i_gid_write(&ip->i_inode, be32_to_cpu(str->di_gid));
357 	set_nlink(&ip->i_inode, be32_to_cpu(str->di_nlink));
358 	i_size_write(&ip->i_inode, be64_to_cpu(str->di_size));
359 	gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
360 	atime.tv_sec = be64_to_cpu(str->di_atime);
361 	atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
362 	if (timespec64_compare(&ip->i_inode.i_atime, &atime) < 0)
363 		ip->i_inode.i_atime = atime;
364 	ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
365 	ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
366 	ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
367 	ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
368 
369 	ip->i_goal = be64_to_cpu(str->di_goal_meta);
370 	ip->i_generation = be64_to_cpu(str->di_generation);
371 
372 	ip->i_diskflags = be32_to_cpu(str->di_flags);
373 	ip->i_eattr = be64_to_cpu(str->di_eattr);
374 	/* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
375 	gfs2_set_inode_flags(&ip->i_inode);
376 	height = be16_to_cpu(str->di_height);
377 	if (unlikely(height > GFS2_MAX_META_HEIGHT))
378 		goto corrupt;
379 	ip->i_height = (u8)height;
380 
381 	depth = be16_to_cpu(str->di_depth);
382 	if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
383 		goto corrupt;
384 	ip->i_depth = (u8)depth;
385 	ip->i_entries = be32_to_cpu(str->di_entries);
386 
387 	if (S_ISREG(ip->i_inode.i_mode))
388 		gfs2_set_aops(&ip->i_inode);
389 
390 	return 0;
391 corrupt:
392 	gfs2_consist_inode(ip);
393 	return -EIO;
394 }
395 
396 /**
397  * gfs2_inode_refresh - Refresh the incore copy of the dinode
398  * @ip: The GFS2 inode
399  *
400  * Returns: errno
401  */
402 
gfs2_inode_refresh(struct gfs2_inode * ip)403 int gfs2_inode_refresh(struct gfs2_inode *ip)
404 {
405 	struct buffer_head *dibh;
406 	int error;
407 
408 	error = gfs2_meta_inode_buffer(ip, &dibh);
409 	if (error)
410 		return error;
411 
412 	error = gfs2_dinode_in(ip, dibh->b_data);
413 	brelse(dibh);
414 	clear_bit(GIF_INVALID, &ip->i_flags);
415 
416 	return error;
417 }
418 
419 /**
420  * inode_go_lock - operation done after an inode lock is locked by a process
421  * @gl: the glock
422  * @flags:
423  *
424  * Returns: errno
425  */
426 
inode_go_lock(struct gfs2_holder * gh)427 static int inode_go_lock(struct gfs2_holder *gh)
428 {
429 	struct gfs2_glock *gl = gh->gh_gl;
430 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
431 	struct gfs2_inode *ip = gl->gl_object;
432 	int error = 0;
433 
434 	if (!ip || (gh->gh_flags & GL_SKIP))
435 		return 0;
436 
437 	if (test_bit(GIF_INVALID, &ip->i_flags)) {
438 		error = gfs2_inode_refresh(ip);
439 		if (error)
440 			return error;
441 	}
442 
443 	if (gh->gh_state != LM_ST_DEFERRED)
444 		inode_dio_wait(&ip->i_inode);
445 
446 	if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
447 	    (gl->gl_state == LM_ST_EXCLUSIVE) &&
448 	    (gh->gh_state == LM_ST_EXCLUSIVE)) {
449 		spin_lock(&sdp->sd_trunc_lock);
450 		if (list_empty(&ip->i_trunc_list))
451 			list_add(&ip->i_trunc_list, &sdp->sd_trunc_list);
452 		spin_unlock(&sdp->sd_trunc_lock);
453 		wake_up(&sdp->sd_quota_wait);
454 		return 1;
455 	}
456 
457 	return error;
458 }
459 
460 /**
461  * inode_go_dump - print information about an inode
462  * @seq: The iterator
463  * @ip: the inode
464  * @fs_id_buf: file system id (may be empty)
465  *
466  */
467 
inode_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)468 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
469 			  const char *fs_id_buf)
470 {
471 	struct gfs2_inode *ip = gl->gl_object;
472 	struct inode *inode = &ip->i_inode;
473 	unsigned long nrpages;
474 
475 	if (ip == NULL)
476 		return;
477 
478 	xa_lock_irq(&inode->i_data.i_pages);
479 	nrpages = inode->i_data.nrpages;
480 	xa_unlock_irq(&inode->i_data.i_pages);
481 
482 	gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
483 		       "p:%lu\n", fs_id_buf,
484 		  (unsigned long long)ip->i_no_formal_ino,
485 		  (unsigned long long)ip->i_no_addr,
486 		  IF2DT(ip->i_inode.i_mode), ip->i_flags,
487 		  (unsigned int)ip->i_diskflags,
488 		  (unsigned long long)i_size_read(inode), nrpages);
489 }
490 
491 /**
492  * freeze_go_sync - promote/demote the freeze glock
493  * @gl: the glock
494  * @state: the requested state
495  * @flags:
496  *
497  */
498 
freeze_go_sync(struct gfs2_glock * gl)499 static void freeze_go_sync(struct gfs2_glock *gl)
500 {
501 	int error = 0;
502 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
503 
504 	if (gl->gl_state == LM_ST_SHARED &&
505 	    test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
506 		atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
507 		error = freeze_super(sdp->sd_vfs);
508 		if (error) {
509 			fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
510 				error);
511 			gfs2_assert_withdraw(sdp, 0);
512 		}
513 		queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
514 		gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
515 			       GFS2_LFC_FREEZE_GO_SYNC);
516 	}
517 }
518 
519 /**
520  * freeze_go_xmote_bh - After promoting/demoting the freeze glock
521  * @gl: the glock
522  *
523  */
524 
freeze_go_xmote_bh(struct gfs2_glock * gl,struct gfs2_holder * gh)525 static int freeze_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
526 {
527 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
528 	struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
529 	struct gfs2_glock *j_gl = ip->i_gl;
530 	struct gfs2_log_header_host head;
531 	int error;
532 
533 	if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
534 		j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
535 
536 		error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
537 		if (error)
538 			gfs2_consist(sdp);
539 		if (!(head.lh_flags & GFS2_LOG_HEAD_UNMOUNT))
540 			gfs2_consist(sdp);
541 
542 		/*  Initialize some head of the log stuff  */
543 		if (!test_bit(SDF_WITHDRAWN, &sdp->sd_flags)) {
544 			sdp->sd_log_sequence = head.lh_sequence + 1;
545 			gfs2_log_pointers_init(sdp, head.lh_blkno);
546 		}
547 	}
548 	return 0;
549 }
550 
551 /**
552  * trans_go_demote_ok
553  * @gl: the glock
554  *
555  * Always returns 0
556  */
557 
freeze_go_demote_ok(const struct gfs2_glock * gl)558 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
559 {
560 	return 0;
561 }
562 
563 /**
564  * iopen_go_callback - schedule the dcache entry for the inode to be deleted
565  * @gl: the glock
566  *
567  * gl_lockref.lock lock is held while calling this
568  */
iopen_go_callback(struct gfs2_glock * gl,bool remote)569 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
570 {
571 	struct gfs2_inode *ip = gl->gl_object;
572 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
573 
574 	if (!remote || sb_rdonly(sdp->sd_vfs))
575 		return;
576 
577 	if (gl->gl_demote_state == LM_ST_UNLOCKED &&
578 	    gl->gl_state == LM_ST_SHARED && ip) {
579 		gl->gl_lockref.count++;
580 		if (queue_work(gfs2_delete_workqueue, &gl->gl_delete) == 0)
581 			gl->gl_lockref.count--;
582 	}
583 }
584 
585 const struct gfs2_glock_operations gfs2_meta_glops = {
586 	.go_type = LM_TYPE_META,
587 };
588 
589 const struct gfs2_glock_operations gfs2_inode_glops = {
590 	.go_sync = inode_go_sync,
591 	.go_inval = inode_go_inval,
592 	.go_demote_ok = inode_go_demote_ok,
593 	.go_lock = inode_go_lock,
594 	.go_dump = inode_go_dump,
595 	.go_type = LM_TYPE_INODE,
596 	.go_flags = GLOF_ASPACE | GLOF_LRU,
597 };
598 
599 const struct gfs2_glock_operations gfs2_rgrp_glops = {
600 	.go_sync = rgrp_go_sync,
601 	.go_inval = rgrp_go_inval,
602 	.go_lock = gfs2_rgrp_go_lock,
603 	.go_unlock = gfs2_rgrp_go_unlock,
604 	.go_dump = gfs2_rgrp_dump,
605 	.go_type = LM_TYPE_RGRP,
606 	.go_flags = GLOF_LVB,
607 };
608 
609 const struct gfs2_glock_operations gfs2_freeze_glops = {
610 	.go_sync = freeze_go_sync,
611 	.go_xmote_bh = freeze_go_xmote_bh,
612 	.go_demote_ok = freeze_go_demote_ok,
613 	.go_type = LM_TYPE_NONDISK,
614 };
615 
616 const struct gfs2_glock_operations gfs2_iopen_glops = {
617 	.go_type = LM_TYPE_IOPEN,
618 	.go_callback = iopen_go_callback,
619 	.go_flags = GLOF_LRU,
620 };
621 
622 const struct gfs2_glock_operations gfs2_flock_glops = {
623 	.go_type = LM_TYPE_FLOCK,
624 	.go_flags = GLOF_LRU,
625 };
626 
627 const struct gfs2_glock_operations gfs2_nondisk_glops = {
628 	.go_type = LM_TYPE_NONDISK,
629 };
630 
631 const struct gfs2_glock_operations gfs2_quota_glops = {
632 	.go_type = LM_TYPE_QUOTA,
633 	.go_flags = GLOF_LVB | GLOF_LRU,
634 };
635 
636 const struct gfs2_glock_operations gfs2_journal_glops = {
637 	.go_type = LM_TYPE_JOURNAL,
638 };
639 
640 const struct gfs2_glock_operations *gfs2_glops_list[] = {
641 	[LM_TYPE_META] = &gfs2_meta_glops,
642 	[LM_TYPE_INODE] = &gfs2_inode_glops,
643 	[LM_TYPE_RGRP] = &gfs2_rgrp_glops,
644 	[LM_TYPE_IOPEN] = &gfs2_iopen_glops,
645 	[LM_TYPE_FLOCK] = &gfs2_flock_glops,
646 	[LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
647 	[LM_TYPE_QUOTA] = &gfs2_quota_glops,
648 	[LM_TYPE_JOURNAL] = &gfs2_journal_glops,
649 };
650 
651