1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/spinlock.h>
8 #include <linux/completion.h>
9 #include <linux/buffer_head.h>
10 #include <linux/gfs2_ondisk.h>
11 #include <linux/bio.h>
12 #include <linux/posix_acl.h>
13 #include <linux/security.h>
14
15 #include "gfs2.h"
16 #include "incore.h"
17 #include "bmap.h"
18 #include "glock.h"
19 #include "glops.h"
20 #include "inode.h"
21 #include "log.h"
22 #include "meta_io.h"
23 #include "recovery.h"
24 #include "rgrp.h"
25 #include "util.h"
26 #include "trans.h"
27 #include "dir.h"
28 #include "lops.h"
29
30 struct workqueue_struct *gfs2_freeze_wq;
31
32 extern struct workqueue_struct *gfs2_control_wq;
33
gfs2_ail_error(struct gfs2_glock * gl,const struct buffer_head * bh)34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh)
35 {
36 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
37
38 fs_err(sdp,
39 "AIL buffer %p: blocknr %llu state 0x%08lx mapping %p page "
40 "state 0x%lx\n",
41 bh, (unsigned long long)bh->b_blocknr, bh->b_state,
42 bh->b_page->mapping, bh->b_page->flags);
43 fs_err(sdp, "AIL glock %u:%llu mapping %p\n",
44 gl->gl_name.ln_type, gl->gl_name.ln_number,
45 gfs2_glock2aspace(gl));
46 gfs2_lm(sdp, "AIL error\n");
47 gfs2_withdraw_delayed(sdp);
48 }
49
50 /**
51 * __gfs2_ail_flush - remove all buffers for a given lock from the AIL
52 * @gl: the glock
53 * @fsync: set when called from fsync (not all buffers will be clean)
54 * @nr_revokes: Number of buffers to revoke
55 *
56 * None of the buffers should be dirty, locked, or pinned.
57 */
58
__gfs2_ail_flush(struct gfs2_glock * gl,bool fsync,unsigned int nr_revokes)59 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync,
60 unsigned int nr_revokes)
61 {
62 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
63 struct list_head *head = &gl->gl_ail_list;
64 struct gfs2_bufdata *bd, *tmp;
65 struct buffer_head *bh;
66 const unsigned long b_state = (1UL << BH_Dirty)|(1UL << BH_Pinned)|(1UL << BH_Lock);
67
68 gfs2_log_lock(sdp);
69 spin_lock(&sdp->sd_ail_lock);
70 list_for_each_entry_safe_reverse(bd, tmp, head, bd_ail_gl_list) {
71 if (nr_revokes == 0)
72 break;
73 bh = bd->bd_bh;
74 if (bh->b_state & b_state) {
75 if (fsync)
76 continue;
77 gfs2_ail_error(gl, bh);
78 }
79 gfs2_trans_add_revoke(sdp, bd);
80 nr_revokes--;
81 }
82 GLOCK_BUG_ON(gl, !fsync && atomic_read(&gl->gl_ail_count));
83 spin_unlock(&sdp->sd_ail_lock);
84 gfs2_log_unlock(sdp);
85 }
86
87
gfs2_ail_empty_gl(struct gfs2_glock * gl)88 static int gfs2_ail_empty_gl(struct gfs2_glock *gl)
89 {
90 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
91 struct gfs2_trans tr;
92 unsigned int revokes;
93 int ret;
94
95 revokes = atomic_read(&gl->gl_ail_count);
96
97 if (!revokes) {
98 bool have_revokes;
99 bool log_in_flight;
100
101 /*
102 * We have nothing on the ail, but there could be revokes on
103 * the sdp revoke queue, in which case, we still want to flush
104 * the log and wait for it to finish.
105 *
106 * If the sdp revoke list is empty too, we might still have an
107 * io outstanding for writing revokes, so we should wait for
108 * it before returning.
109 *
110 * If none of these conditions are true, our revokes are all
111 * flushed and we can return.
112 */
113 gfs2_log_lock(sdp);
114 have_revokes = !list_empty(&sdp->sd_log_revokes);
115 log_in_flight = atomic_read(&sdp->sd_log_in_flight);
116 gfs2_log_unlock(sdp);
117 if (have_revokes)
118 goto flush;
119 if (log_in_flight)
120 log_flush_wait(sdp);
121 return 0;
122 }
123
124 memset(&tr, 0, sizeof(tr));
125 set_bit(TR_ONSTACK, &tr.tr_flags);
126 ret = __gfs2_trans_begin(&tr, sdp, 0, revokes, _RET_IP_);
127 if (ret)
128 goto flush;
129 __gfs2_ail_flush(gl, 0, revokes);
130 gfs2_trans_end(sdp);
131
132 flush:
133 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
134 GFS2_LFC_AIL_EMPTY_GL);
135 return 0;
136 }
137
gfs2_ail_flush(struct gfs2_glock * gl,bool fsync)138 void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
139 {
140 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
141 unsigned int revokes = atomic_read(&gl->gl_ail_count);
142 int ret;
143
144 if (!revokes)
145 return;
146
147 ret = gfs2_trans_begin(sdp, 0, revokes);
148 if (ret)
149 return;
150 __gfs2_ail_flush(gl, fsync, revokes);
151 gfs2_trans_end(sdp);
152 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
153 GFS2_LFC_AIL_FLUSH);
154 }
155
156 /**
157 * gfs2_rgrp_metasync - sync out the metadata of a resource group
158 * @gl: the glock protecting the resource group
159 *
160 */
161
gfs2_rgrp_metasync(struct gfs2_glock * gl)162 static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
163 {
164 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
165 struct address_space *metamapping = &sdp->sd_aspace;
166 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
167 const unsigned bsize = sdp->sd_sb.sb_bsize;
168 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
169 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
170 int error;
171
172 filemap_fdatawrite_range(metamapping, start, end);
173 error = filemap_fdatawait_range(metamapping, start, end);
174 WARN_ON_ONCE(error && !gfs2_withdrawn(sdp));
175 mapping_set_error(metamapping, error);
176 if (error)
177 gfs2_io_error(sdp);
178 return error;
179 }
180
181 /**
182 * rgrp_go_sync - sync out the metadata for this glock
183 * @gl: the glock
184 *
185 * Called when demoting or unlocking an EX glock. We must flush
186 * to disk all dirty buffers/pages relating to this glock, and must not
187 * return to caller to demote/unlock the glock until I/O is complete.
188 */
189
rgrp_go_sync(struct gfs2_glock * gl)190 static int rgrp_go_sync(struct gfs2_glock *gl)
191 {
192 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
193 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
194 int error;
195
196 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
197 return 0;
198 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
199
200 gfs2_log_flush(sdp, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
201 GFS2_LFC_RGRP_GO_SYNC);
202 error = gfs2_rgrp_metasync(gl);
203 if (!error)
204 error = gfs2_ail_empty_gl(gl);
205 gfs2_free_clones(rgd);
206 return error;
207 }
208
209 /**
210 * rgrp_go_inval - invalidate the metadata for this glock
211 * @gl: the glock
212 * @flags:
213 *
214 * We never used LM_ST_DEFERRED with resource groups, so that we
215 * should always see the metadata flag set here.
216 *
217 */
218
rgrp_go_inval(struct gfs2_glock * gl,int flags)219 static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
220 {
221 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
222 struct address_space *mapping = &sdp->sd_aspace;
223 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
224 const unsigned bsize = sdp->sd_sb.sb_bsize;
225 loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
226 loff_t end = PAGE_ALIGN((rgd->rd_addr + rgd->rd_length) * bsize) - 1;
227
228 gfs2_rgrp_brelse(rgd);
229 WARN_ON_ONCE(!(flags & DIO_METADATA));
230 truncate_inode_pages_range(mapping, start, end);
231 }
232
gfs2_rgrp_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)233 static void gfs2_rgrp_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
234 const char *fs_id_buf)
235 {
236 struct gfs2_rgrpd *rgd = gl->gl_object;
237
238 if (rgd)
239 gfs2_rgrp_dump(seq, rgd, fs_id_buf);
240 }
241
gfs2_glock2inode(struct gfs2_glock * gl)242 static struct gfs2_inode *gfs2_glock2inode(struct gfs2_glock *gl)
243 {
244 struct gfs2_inode *ip;
245
246 spin_lock(&gl->gl_lockref.lock);
247 ip = gl->gl_object;
248 if (ip)
249 set_bit(GIF_GLOP_PENDING, &ip->i_flags);
250 spin_unlock(&gl->gl_lockref.lock);
251 return ip;
252 }
253
gfs2_glock2rgrp(struct gfs2_glock * gl)254 struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
255 {
256 struct gfs2_rgrpd *rgd;
257
258 spin_lock(&gl->gl_lockref.lock);
259 rgd = gl->gl_object;
260 spin_unlock(&gl->gl_lockref.lock);
261
262 return rgd;
263 }
264
gfs2_clear_glop_pending(struct gfs2_inode * ip)265 static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
266 {
267 if (!ip)
268 return;
269
270 clear_bit_unlock(GIF_GLOP_PENDING, &ip->i_flags);
271 wake_up_bit(&ip->i_flags, GIF_GLOP_PENDING);
272 }
273
274 /**
275 * gfs2_inode_metasync - sync out the metadata of an inode
276 * @gl: the glock protecting the inode
277 *
278 */
gfs2_inode_metasync(struct gfs2_glock * gl)279 int gfs2_inode_metasync(struct gfs2_glock *gl)
280 {
281 struct address_space *metamapping = gfs2_glock2aspace(gl);
282 int error;
283
284 filemap_fdatawrite(metamapping);
285 error = filemap_fdatawait(metamapping);
286 if (error)
287 gfs2_io_error(gl->gl_name.ln_sbd);
288 return error;
289 }
290
291 /**
292 * inode_go_sync - Sync the dirty metadata of an inode
293 * @gl: the glock protecting the inode
294 *
295 */
296
inode_go_sync(struct gfs2_glock * gl)297 static int inode_go_sync(struct gfs2_glock *gl)
298 {
299 struct gfs2_inode *ip = gfs2_glock2inode(gl);
300 int isreg = ip && S_ISREG(ip->i_inode.i_mode);
301 struct address_space *metamapping = gfs2_glock2aspace(gl);
302 int error = 0, ret;
303
304 if (isreg) {
305 if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
306 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
307 inode_dio_wait(&ip->i_inode);
308 }
309 if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
310 goto out;
311
312 GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_EXCLUSIVE);
313
314 gfs2_log_flush(gl->gl_name.ln_sbd, gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
315 GFS2_LFC_INODE_GO_SYNC);
316 filemap_fdatawrite(metamapping);
317 if (isreg) {
318 struct address_space *mapping = ip->i_inode.i_mapping;
319 filemap_fdatawrite(mapping);
320 error = filemap_fdatawait(mapping);
321 mapping_set_error(mapping, error);
322 }
323 ret = gfs2_inode_metasync(gl);
324 if (!error)
325 error = ret;
326 gfs2_ail_empty_gl(gl);
327 /*
328 * Writeback of the data mapping may cause the dirty flag to be set
329 * so we have to clear it again here.
330 */
331 smp_mb__before_atomic();
332 clear_bit(GLF_DIRTY, &gl->gl_flags);
333
334 out:
335 gfs2_clear_glop_pending(ip);
336 return error;
337 }
338
339 /**
340 * inode_go_inval - prepare a inode glock to be released
341 * @gl: the glock
342 * @flags:
343 *
344 * Normally we invalidate everything, but if we are moving into
345 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
346 * can keep hold of the metadata, since it won't have changed.
347 *
348 */
349
inode_go_inval(struct gfs2_glock * gl,int flags)350 static void inode_go_inval(struct gfs2_glock *gl, int flags)
351 {
352 struct gfs2_inode *ip = gfs2_glock2inode(gl);
353
354 if (flags & DIO_METADATA) {
355 struct address_space *mapping = gfs2_glock2aspace(gl);
356 truncate_inode_pages(mapping, 0);
357 if (ip) {
358 set_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
359 forget_all_cached_acls(&ip->i_inode);
360 security_inode_invalidate_secctx(&ip->i_inode);
361 gfs2_dir_hash_inval(ip);
362 }
363 }
364
365 if (ip == GFS2_I(gl->gl_name.ln_sbd->sd_rindex)) {
366 gfs2_log_flush(gl->gl_name.ln_sbd, NULL,
367 GFS2_LOG_HEAD_FLUSH_NORMAL |
368 GFS2_LFC_INODE_GO_INVAL);
369 gl->gl_name.ln_sbd->sd_rindex_uptodate = 0;
370 }
371 if (ip && S_ISREG(ip->i_inode.i_mode))
372 truncate_inode_pages(ip->i_inode.i_mapping, 0);
373
374 gfs2_clear_glop_pending(ip);
375 }
376
377 /**
378 * inode_go_demote_ok - Check to see if it's ok to unlock an inode glock
379 * @gl: the glock
380 *
381 * Returns: 1 if it's ok
382 */
383
inode_go_demote_ok(const struct gfs2_glock * gl)384 static int inode_go_demote_ok(const struct gfs2_glock *gl)
385 {
386 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
387
388 if (sdp->sd_jindex == gl->gl_object || sdp->sd_rindex == gl->gl_object)
389 return 0;
390
391 return 1;
392 }
393
gfs2_dinode_in(struct gfs2_inode * ip,const void * buf)394 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf)
395 {
396 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
397 const struct gfs2_dinode *str = buf;
398 struct timespec64 atime;
399 u16 height, depth;
400 umode_t mode = be32_to_cpu(str->di_mode);
401 struct inode *inode = &ip->i_inode;
402 bool is_new = inode->i_state & I_NEW;
403
404 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr)))
405 goto corrupt;
406 if (unlikely(!is_new && inode_wrong_type(inode, mode)))
407 goto corrupt;
408 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino);
409 inode->i_mode = mode;
410 if (is_new) {
411 inode->i_rdev = 0;
412 switch (mode & S_IFMT) {
413 case S_IFBLK:
414 case S_IFCHR:
415 inode->i_rdev = MKDEV(be32_to_cpu(str->di_major),
416 be32_to_cpu(str->di_minor));
417 break;
418 }
419 }
420
421 i_uid_write(inode, be32_to_cpu(str->di_uid));
422 i_gid_write(inode, be32_to_cpu(str->di_gid));
423 set_nlink(inode, be32_to_cpu(str->di_nlink));
424 i_size_write(inode, be64_to_cpu(str->di_size));
425 gfs2_set_inode_blocks(inode, be64_to_cpu(str->di_blocks));
426 atime.tv_sec = be64_to_cpu(str->di_atime);
427 atime.tv_nsec = be32_to_cpu(str->di_atime_nsec);
428 if (timespec64_compare(&inode->i_atime, &atime) < 0)
429 inode->i_atime = atime;
430 inode->i_mtime.tv_sec = be64_to_cpu(str->di_mtime);
431 inode->i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec);
432 inode->i_ctime.tv_sec = be64_to_cpu(str->di_ctime);
433 inode->i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec);
434
435 ip->i_goal = be64_to_cpu(str->di_goal_meta);
436 ip->i_generation = be64_to_cpu(str->di_generation);
437
438 ip->i_diskflags = be32_to_cpu(str->di_flags);
439 ip->i_eattr = be64_to_cpu(str->di_eattr);
440 /* i_diskflags and i_eattr must be set before gfs2_set_inode_flags() */
441 gfs2_set_inode_flags(inode);
442 height = be16_to_cpu(str->di_height);
443 if (unlikely(height > sdp->sd_max_height))
444 goto corrupt;
445 ip->i_height = (u8)height;
446
447 depth = be16_to_cpu(str->di_depth);
448 if (unlikely(depth > GFS2_DIR_MAX_DEPTH))
449 goto corrupt;
450 ip->i_depth = (u8)depth;
451 ip->i_entries = be32_to_cpu(str->di_entries);
452
453 if (gfs2_is_stuffed(ip) && inode->i_size > gfs2_max_stuffed_size(ip))
454 goto corrupt;
455
456 if (S_ISREG(inode->i_mode))
457 gfs2_set_aops(inode);
458
459 return 0;
460 corrupt:
461 gfs2_consist_inode(ip);
462 return -EIO;
463 }
464
465 /**
466 * gfs2_inode_refresh - Refresh the incore copy of the dinode
467 * @ip: The GFS2 inode
468 *
469 * Returns: errno
470 */
471
gfs2_inode_refresh(struct gfs2_inode * ip)472 int gfs2_inode_refresh(struct gfs2_inode *ip)
473 {
474 struct buffer_head *dibh;
475 int error;
476
477 error = gfs2_meta_inode_buffer(ip, &dibh);
478 if (error)
479 return error;
480
481 error = gfs2_dinode_in(ip, dibh->b_data);
482 brelse(dibh);
483 return error;
484 }
485
486 /**
487 * inode_go_instantiate - read in an inode if necessary
488 * @gh: The glock holder
489 *
490 * Returns: errno
491 */
492
inode_go_instantiate(struct gfs2_glock * gl)493 static int inode_go_instantiate(struct gfs2_glock *gl)
494 {
495 struct gfs2_inode *ip = gl->gl_object;
496
497 if (!ip) /* no inode to populate - read it in later */
498 return 0;
499
500 return gfs2_inode_refresh(ip);
501 }
502
inode_go_held(struct gfs2_holder * gh)503 static int inode_go_held(struct gfs2_holder *gh)
504 {
505 struct gfs2_glock *gl = gh->gh_gl;
506 struct gfs2_inode *ip = gl->gl_object;
507 int error = 0;
508
509 if (!ip) /* no inode to populate - read it in later */
510 return 0;
511
512 if (gh->gh_state != LM_ST_DEFERRED)
513 inode_dio_wait(&ip->i_inode);
514
515 if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
516 (gl->gl_state == LM_ST_EXCLUSIVE) &&
517 (gh->gh_state == LM_ST_EXCLUSIVE))
518 error = gfs2_truncatei_resume(ip);
519
520 return error;
521 }
522
523 /**
524 * inode_go_dump - print information about an inode
525 * @seq: The iterator
526 * @gl: The glock
527 * @fs_id_buf: file system id (may be empty)
528 *
529 */
530
inode_go_dump(struct seq_file * seq,struct gfs2_glock * gl,const char * fs_id_buf)531 static void inode_go_dump(struct seq_file *seq, struct gfs2_glock *gl,
532 const char *fs_id_buf)
533 {
534 struct gfs2_inode *ip = gl->gl_object;
535 struct inode *inode = &ip->i_inode;
536 unsigned long nrpages;
537
538 if (ip == NULL)
539 return;
540
541 xa_lock_irq(&inode->i_data.i_pages);
542 nrpages = inode->i_data.nrpages;
543 xa_unlock_irq(&inode->i_data.i_pages);
544
545 gfs2_print_dbg(seq, "%s I: n:%llu/%llu t:%u f:0x%02lx d:0x%08x s:%llu "
546 "p:%lu\n", fs_id_buf,
547 (unsigned long long)ip->i_no_formal_ino,
548 (unsigned long long)ip->i_no_addr,
549 IF2DT(ip->i_inode.i_mode), ip->i_flags,
550 (unsigned int)ip->i_diskflags,
551 (unsigned long long)i_size_read(inode), nrpages);
552 }
553
554 /**
555 * freeze_go_sync - promote/demote the freeze glock
556 * @gl: the glock
557 */
558
freeze_go_sync(struct gfs2_glock * gl)559 static int freeze_go_sync(struct gfs2_glock *gl)
560 {
561 int error = 0;
562 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
563
564 /*
565 * We need to check gl_state == LM_ST_SHARED here and not gl_req ==
566 * LM_ST_EXCLUSIVE. That's because when any node does a freeze,
567 * all the nodes should have the freeze glock in SH mode and they all
568 * call do_xmote: One for EX and the others for UN. They ALL must
569 * freeze locally, and they ALL must queue freeze work. The freeze_work
570 * calls freeze_func, which tries to reacquire the freeze glock in SH,
571 * effectively waiting for the thaw on the node who holds it in EX.
572 * Once thawed, the work func acquires the freeze glock in
573 * SH and everybody goes back to thawed.
574 */
575 if (gl->gl_state == LM_ST_SHARED && !gfs2_withdrawn(sdp) &&
576 !test_bit(SDF_NORECOVERY, &sdp->sd_flags)) {
577 atomic_set(&sdp->sd_freeze_state, SFS_STARTING_FREEZE);
578 error = freeze_super(sdp->sd_vfs);
579 if (error) {
580 fs_info(sdp, "GFS2: couldn't freeze filesystem: %d\n",
581 error);
582 if (gfs2_withdrawn(sdp)) {
583 atomic_set(&sdp->sd_freeze_state, SFS_UNFROZEN);
584 return 0;
585 }
586 gfs2_assert_withdraw(sdp, 0);
587 }
588 queue_work(gfs2_freeze_wq, &sdp->sd_freeze_work);
589 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
590 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_FREEZE |
591 GFS2_LFC_FREEZE_GO_SYNC);
592 else /* read-only mounts */
593 atomic_set(&sdp->sd_freeze_state, SFS_FROZEN);
594 }
595 return 0;
596 }
597
598 /**
599 * freeze_go_xmote_bh - After promoting/demoting the freeze glock
600 * @gl: the glock
601 */
freeze_go_xmote_bh(struct gfs2_glock * gl)602 static int freeze_go_xmote_bh(struct gfs2_glock *gl)
603 {
604 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
605 struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
606 struct gfs2_glock *j_gl = ip->i_gl;
607 struct gfs2_log_header_host head;
608 int error;
609
610 if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
611 j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
612
613 error = gfs2_find_jhead(sdp->sd_jdesc, &head, false);
614 if (gfs2_assert_withdraw_delayed(sdp, !error))
615 return error;
616 if (gfs2_assert_withdraw_delayed(sdp, head.lh_flags &
617 GFS2_LOG_HEAD_UNMOUNT))
618 return -EIO;
619 sdp->sd_log_sequence = head.lh_sequence + 1;
620 gfs2_log_pointers_init(sdp, head.lh_blkno);
621 }
622 return 0;
623 }
624
625 /**
626 * freeze_go_demote_ok
627 * @gl: the glock
628 *
629 * Always returns 0
630 */
631
freeze_go_demote_ok(const struct gfs2_glock * gl)632 static int freeze_go_demote_ok(const struct gfs2_glock *gl)
633 {
634 return 0;
635 }
636
637 /**
638 * iopen_go_callback - schedule the dcache entry for the inode to be deleted
639 * @gl: the glock
640 * @remote: true if this came from a different cluster node
641 *
642 * gl_lockref.lock lock is held while calling this
643 */
iopen_go_callback(struct gfs2_glock * gl,bool remote)644 static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
645 {
646 struct gfs2_inode *ip = gl->gl_object;
647 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
648
649 if (!remote || sb_rdonly(sdp->sd_vfs))
650 return;
651
652 if (gl->gl_demote_state == LM_ST_UNLOCKED &&
653 gl->gl_state == LM_ST_SHARED && ip) {
654 gl->gl_lockref.count++;
655 if (!queue_delayed_work(gfs2_delete_workqueue,
656 &gl->gl_delete, 0))
657 gl->gl_lockref.count--;
658 }
659 }
660
iopen_go_demote_ok(const struct gfs2_glock * gl)661 static int iopen_go_demote_ok(const struct gfs2_glock *gl)
662 {
663 return !gfs2_delete_work_queued(gl);
664 }
665
666 /**
667 * inode_go_free - wake up anyone waiting for dlm's unlock ast to free it
668 * @gl: glock being freed
669 *
670 * For now, this is only used for the journal inode glock. In withdraw
671 * situations, we need to wait for the glock to be freed so that we know
672 * other nodes may proceed with recovery / journal replay.
673 */
inode_go_free(struct gfs2_glock * gl)674 static void inode_go_free(struct gfs2_glock *gl)
675 {
676 /* Note that we cannot reference gl_object because it's already set
677 * to NULL by this point in its lifecycle. */
678 if (!test_bit(GLF_FREEING, &gl->gl_flags))
679 return;
680 clear_bit_unlock(GLF_FREEING, &gl->gl_flags);
681 wake_up_bit(&gl->gl_flags, GLF_FREEING);
682 }
683
684 /**
685 * nondisk_go_callback - used to signal when a node did a withdraw
686 * @gl: the nondisk glock
687 * @remote: true if this came from a different cluster node
688 *
689 */
nondisk_go_callback(struct gfs2_glock * gl,bool remote)690 static void nondisk_go_callback(struct gfs2_glock *gl, bool remote)
691 {
692 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
693
694 /* Ignore the callback unless it's from another node, and it's the
695 live lock. */
696 if (!remote || gl->gl_name.ln_number != GFS2_LIVE_LOCK)
697 return;
698
699 /* First order of business is to cancel the demote request. We don't
700 * really want to demote a nondisk glock. At best it's just to inform
701 * us of another node's withdraw. We'll keep it in SH mode. */
702 clear_bit(GLF_DEMOTE, &gl->gl_flags);
703 clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags);
704
705 /* Ignore the unlock if we're withdrawn, unmounting, or in recovery. */
706 if (test_bit(SDF_NORECOVERY, &sdp->sd_flags) ||
707 test_bit(SDF_WITHDRAWN, &sdp->sd_flags) ||
708 test_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags))
709 return;
710
711 /* We only care when a node wants us to unlock, because that means
712 * they want a journal recovered. */
713 if (gl->gl_demote_state != LM_ST_UNLOCKED)
714 return;
715
716 if (sdp->sd_args.ar_spectator) {
717 fs_warn(sdp, "Spectator node cannot recover journals.\n");
718 return;
719 }
720
721 fs_warn(sdp, "Some node has withdrawn; checking for recovery.\n");
722 set_bit(SDF_REMOTE_WITHDRAW, &sdp->sd_flags);
723 /*
724 * We can't call remote_withdraw directly here or gfs2_recover_journal
725 * because this is called from the glock unlock function and the
726 * remote_withdraw needs to enqueue and dequeue the same "live" glock
727 * we were called from. So we queue it to the control work queue in
728 * lock_dlm.
729 */
730 queue_delayed_work(gfs2_control_wq, &sdp->sd_control_work, 0);
731 }
732
733 const struct gfs2_glock_operations gfs2_meta_glops = {
734 .go_type = LM_TYPE_META,
735 .go_flags = GLOF_NONDISK,
736 };
737
738 const struct gfs2_glock_operations gfs2_inode_glops = {
739 .go_sync = inode_go_sync,
740 .go_inval = inode_go_inval,
741 .go_demote_ok = inode_go_demote_ok,
742 .go_instantiate = inode_go_instantiate,
743 .go_held = inode_go_held,
744 .go_dump = inode_go_dump,
745 .go_type = LM_TYPE_INODE,
746 .go_flags = GLOF_ASPACE | GLOF_LRU | GLOF_LVB,
747 .go_free = inode_go_free,
748 };
749
750 const struct gfs2_glock_operations gfs2_rgrp_glops = {
751 .go_sync = rgrp_go_sync,
752 .go_inval = rgrp_go_inval,
753 .go_instantiate = gfs2_rgrp_go_instantiate,
754 .go_dump = gfs2_rgrp_go_dump,
755 .go_type = LM_TYPE_RGRP,
756 .go_flags = GLOF_LVB,
757 };
758
759 const struct gfs2_glock_operations gfs2_freeze_glops = {
760 .go_sync = freeze_go_sync,
761 .go_xmote_bh = freeze_go_xmote_bh,
762 .go_demote_ok = freeze_go_demote_ok,
763 .go_type = LM_TYPE_NONDISK,
764 .go_flags = GLOF_NONDISK,
765 };
766
767 const struct gfs2_glock_operations gfs2_iopen_glops = {
768 .go_type = LM_TYPE_IOPEN,
769 .go_callback = iopen_go_callback,
770 .go_dump = inode_go_dump,
771 .go_demote_ok = iopen_go_demote_ok,
772 .go_flags = GLOF_LRU | GLOF_NONDISK,
773 .go_subclass = 1,
774 };
775
776 const struct gfs2_glock_operations gfs2_flock_glops = {
777 .go_type = LM_TYPE_FLOCK,
778 .go_flags = GLOF_LRU | GLOF_NONDISK,
779 };
780
781 const struct gfs2_glock_operations gfs2_nondisk_glops = {
782 .go_type = LM_TYPE_NONDISK,
783 .go_flags = GLOF_NONDISK,
784 .go_callback = nondisk_go_callback,
785 };
786
787 const struct gfs2_glock_operations gfs2_quota_glops = {
788 .go_type = LM_TYPE_QUOTA,
789 .go_flags = GLOF_LVB | GLOF_LRU | GLOF_NONDISK,
790 };
791
792 const struct gfs2_glock_operations gfs2_journal_glops = {
793 .go_type = LM_TYPE_JOURNAL,
794 .go_flags = GLOF_NONDISK,
795 };
796
797 const struct gfs2_glock_operations *gfs2_glops_list[] = {
798 [LM_TYPE_META] = &gfs2_meta_glops,
799 [LM_TYPE_INODE] = &gfs2_inode_glops,
800 [LM_TYPE_RGRP] = &gfs2_rgrp_glops,
801 [LM_TYPE_IOPEN] = &gfs2_iopen_glops,
802 [LM_TYPE_FLOCK] = &gfs2_flock_glops,
803 [LM_TYPE_NONDISK] = &gfs2_nondisk_glops,
804 [LM_TYPE_QUOTA] = &gfs2_quota_glops,
805 [LM_TYPE_JOURNAL] = &gfs2_journal_glops,
806 };
807
808